diff --git a/.github/workflows/polkadot-companion-labels.yml b/.github/workflows/polkadot-companion-labels.yml index 27f743e1bd4529dc83ee7dfd5b96c6c5bb4ac3f8..3c3987b5f4d563cfa15f36a87dc588a674b59660 100644 --- a/.github/workflows/polkadot-companion-labels.yml +++ b/.github/workflows/polkadot-companion-labels.yml @@ -16,9 +16,10 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} contexts: 'continuous-integration/gitlab-check-polkadot-companion-build' timeout: 1800 - notPresentTimeout: 3600 # It can take quite a while before the job starts... + notPresentTimeout: 3600 # It can take quite a while before the job starts on Gitlab when the CI queue is large failureStates: failure interruptedStates: error # Error = job was probably cancelled. We don't want to label the PR in that case + pollInterval: 30 - name: Label success uses: andymckay/labeler@master if: steps.check-companion-status.outputs.result == 'success' diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c97d68bab0030484317757c9a69737430c80d5d8..3ec5b007119c631d08df7b9dd8785f78ddb82cf5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -13,7 +13,7 @@ # image: paritytech/tools:latest # Any docker image (required) # allow_failure: true # Allow the pipeline to continue if this job fails (default: false) # dependencies: -# - build-rust-doc-release # Any jobs that are required to run before this job (optional) +# - build-rust-doc # Any jobs that are required to run before this job (optional) # variables: # MY_ENVIRONMENT_VARIABLE: "some useful value" # Environment variables passed to the job (optional) # script: @@ -193,6 +193,9 @@ cargo-deny: - schedules - tags - web + except: + variables: + - $CI_COMMIT_MESSAGE =~ /skip-checks/ script: - cargo deny check --hide-inclusion-graph -c .maintain/deny.toml after_script: @@ -273,6 +276,9 @@ unleash-check: only: - master - tags + except: + variables: + - $CI_COMMIT_MESSAGE =~ /skip-checks/ script: - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - cargo unleash check ${CARGO_UNLEASH_PKG_DEF} @@ -336,6 +342,9 @@ check-web-wasm: - time cargo build --target=wasm32-unknown-unknown -p sc-telemetry # Note: the command below is a bit weird because several Cargo issues prevent us from compiling the node in a more straight-forward way. - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown -Z features=itarget + # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases + - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features + - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - sccache -s test-full-crypto-feature: @@ -476,23 +485,25 @@ build-macos-subkey: tags: - osx -build-rust-doc-release: +build-rust-doc: stage: build <<: *docker-env <<: *docker-env-only allow_failure: true + variables: + <<: *default-vars + RUSTFLAGS: -Dwarnings artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success expire_in: 7 days paths: - - ./crate-docs - <<: *build-only + - ./crate-docs/ script: - rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds - BUILD_DUMMY_WASM_BINARY=1 RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" - time cargo +nightly doc --release --all --verbose - - cp -R ./target/doc ./crate-docs + time cargo +nightly doc --no-deps --workspace --all-features --verbose + - mv ./target/doc ./crate-docs - echo "" > ./crate-docs/index.html - sccache -s @@ -520,7 +531,7 @@ docker-build-chaos: &docker-build-chaos needs: - job: build-linux-substrate image: docker:stable - tags: + tags: - kubernetes-parity-build variables: <<: *default-vars @@ -559,7 +570,7 @@ chaos-test-singlenodeheight: image: parity/chaostools:latest needs: - job: docker-build-chaos - tags: + tags: - parity-chaos variables: <<: *default-vars @@ -670,7 +681,7 @@ publish-s3-doc: image: paritytech/awscli:latest allow_failure: true needs: - - job: build-rust-doc-release + - job: build-rust-doc artifacts: true <<: *build-only <<: *kubernetes-build @@ -734,7 +745,8 @@ deploy-kubernetes-alerting-rules: refs: - master changes: - - "${RULES}" + - .gitlab-ci.yml + - .maintain/monitoring/ diff --git a/.maintain/chaostest/package-lock.json b/.maintain/chaostest/package-lock.json index 8855f221a133d14040312ce255bbf5ced3c27183..09468e12fb4f9ca72d8d6e352960464d4b2502ab 100644 --- a/.maintain/chaostest/package-lock.json +++ b/.maintain/chaostest/package-lock.json @@ -941,9 +941,9 @@ } }, "bl": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.0.2.tgz", - "integrity": "sha512-j4OH8f6Qg2bGuWfRiltT2HYGx0e1QcBTrK9KAHNMwMZdQnDZFk0ZSYIpADjYCB3U12nicC5tVJwSIhwOWjb4RQ==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.0.3.tgz", + "integrity": "sha512-fs4G6/Hu4/EE+F75J8DuN/0IpQqNjAdC7aEQv7Qt8MHGUH7Ckv2MwTEEeN9QehD0pfIDkMI1bkHYkKy7xHyKIg==", "dev": true, "requires": { "buffer": "^5.5.0", @@ -3836,9 +3836,9 @@ } }, "lodash": { - "version": "4.17.15", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", - "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==" + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==" }, "lodash._reinterpolate": { "version": "3.0.0", diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index b78c26dea8458b0bcc7c172d163d31be68354b90..219af5001b0537b40df60a5a8f77f2f939f97d0a 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -1,9 +1,9 @@ -#!/bin/sh +#!/usr/bin/env sh # -# check if a pr is compatible with polkadot companion pr or master if not +# check if a pr is compatible with polkadot companion pr or master if not # available # -# to override one that was just mentioned mark companion pr in the body of the +# to override one that was just mentioned mark companion pr in the body of the # polkadot pr like # # polkadot companion: paritytech/polkadot#567 @@ -12,7 +12,7 @@ github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" # use github api v3 in order to access the data without authentication -github_header="Authorization: token ${GITHUB_PR_TOKEN}" +github_header="Authorization: token ${GITHUB_PR_TOKEN}" boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } @@ -40,7 +40,7 @@ EOT git config --global user.name 'CI system' git config --global user.email '<>' -SUBSTRATE_PATH=$(pwd) +cargo install -f --version 0.2.0 diener # Merge master into our branch before building Polkadot to make sure we don't miss # any commits that are required by Polkadot. @@ -85,14 +85,10 @@ else boldprint "this is not a pull request - building polkadot:master" fi -# Make sure we override the crates in native and wasm build -# patching the git path as described in the link below did not test correctly -# https://doc.rust-lang.org/cargo/reference/overriding-dependencies.html -mkdir .cargo -echo "paths = [ \"$SUBSTRATE_PATH\" ]" > .cargo/config - -mkdir -p target/debug/wbuild/.cargo -cp .cargo/config target/debug/wbuild/.cargo/config +cd .. +$CARGO_HOME/bin/diener --substrate --branch $CI_COMMIT_REF_NAME --git https://gitlab.parity.io/parity/substrate.git --path polkadot +cd polkadot # Test Polkadot pr or master branch with this Substrate commit. +cargo update -p sp-io time cargo test --all --release --verbose diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 12f46e17ad85b3b29ea169a2b3ac861c7118b6a3..7f36fedb4ba678041a4cc375e3ddef9d22956993 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -109,6 +109,19 @@ groups: message: 'The node {{ $labels.instance }} has less than 3 peers for more than 15 minutes' + ############################################################################## + # System + ############################################################################## + + - alert: HighNumberOfFileDescriptors + expr: 'node_filefd_allocated{domain=~"kusama|polkadot"} > 10000' + for: 3m + labels: + severity: warning + annotations: + message: 'The node {{ $labels.instance }} has more than 10_000 file + descriptors allocated for more than 3 minutes' + ############################################################################## # Others ############################################################################## diff --git a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json index 245071c210bfc48d815b6f963f3d51fbc4dd61b4..539fdec086a370367176fa4dc6681a61d876385e 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json @@ -44,14 +44,14 @@ { "datasource": "$data_source", "enable": true, - "expr": "increase(${metric_namespace}_tasks_ended_total{reason=\"panic\", instance=~\"${nodename}\"}[5m])", + "expr": "increase(${metric_namespace}_tasks_ended_total{reason=\"panic\", instance=~\"${nodename}\"}[10m])", "hide": true, "iconColor": "rgba(255, 96, 96, 1)", "limit": 100, "name": "Task panics", "rawQuery": "SELECT\n extract(epoch from time_column) AS time,\n text_column as text,\n tags_column as tags\nFROM\n metric_table\nWHERE\n $__timeFilter(time_column)\n", "showIn": 0, - "step": "", + "step": "10m", "tags": [], "textFormat": "{{instance}} - {{task_name}}", "titleFormat": "Panic!", @@ -60,12 +60,12 @@ { "datasource": "$data_source", "enable": true, - "expr": "changes(${metric_namespace}_process_start_time_seconds{instance=~\"${nodename}\"}[5m])", + "expr": "changes(${metric_namespace}_process_start_time_seconds{instance=~\"${nodename}\"}[10m])", "hide": false, "iconColor": "#8AB8FF", "name": "Node reboots", "showIn": 0, - "step": "", + "step": "10m", "textFormat": "{{instance}}", "titleFormat": "Reboots" } @@ -75,7 +75,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1594822742772, + "iteration": 1599471940817, "links": [], "panels": [ { @@ -87,9 +87,9 @@ "x": 0, "y": 0 }, - "id": 25, + "id": 29, "panels": [], - "title": "CPU & Memory", + "title": "Tasks", "type": "row" }, { @@ -107,130 +107,23 @@ "y": 1 }, "hiddenSeries": false, - "id": 9, + "id": 11, + "interval": "1m", "legend": { - "avg": false, + "alignAsTable": true, + "avg": true, "current": false, + "hideEmpty": false, + "hideZero": false, "max": false, "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "stddev-above", - "fillBelowTo": "stddev-below", - "hideTooltip": true, - "lines": false - }, - { - "alias": "stddev-below", - "hideTooltip": true, - "lines": false - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"})", - "interval": "", - "legendFormat": "cpu-usage", - "refId": "A" - }, - { - "expr": "avg(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"}) - stddev(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"})", - "interval": "", - "legendFormat": "stddev-below", - "refId": "B" - }, - { - "expr": "avg(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"}) + stddev(${metric_namespace}_cpu_usage_percentage{instance=~\"${nodename}\"})", - "interval": "", - "legendFormat": "stddev-above", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average CPU usage and standard deviation", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, + "rightSide": true, "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percent", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 7 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, "total": false, - "values": false + "values": true }, "lines": true, - "linewidth": 1, + "linewidth": 2, "nullPointMode": "null", "options": { "dataLinks": [] @@ -242,12 +135,12 @@ "seriesOverrides": [], "spaceLength": 10, "stack": false, - "steppedLine": false, + "steppedLine": true, "targets": [ { - "expr": "${metric_namespace}_memory_usage_bytes{instance=~\"${nodename}\"}", + "expr": "avg(irate(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[10m])) by (task_name)", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{task_name}}", "refId": "A" } ], @@ -255,7 +148,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Memory usage", + "title": "CPU time spent on each task (average per node)", "tooltip": { "shared": true, "sort": 2, @@ -271,7 +164,7 @@ }, "yaxes": [ { - "format": "decbytes", + "format": "percentunit", "label": null, "logBase": 1, "max": null, @@ -292,20 +185,6 @@ "alignLevel": null } }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 13 - }, - "id": 29, - "panels": [], - "title": "Tasks", - "type": "row" - }, { "aliasColors": {}, "bars": false, @@ -318,19 +197,23 @@ "h": 6, "w": 24, "x": 0, - "y": 14 + "y": 7 }, "hiddenSeries": false, - "id": 11, - "interval": "1m", + "id": 30, + "interval": "", "legend": { - "avg": false, + "alignAsTable": true, + "avg": true, "current": false, + "hideEmpty": false, + "hideZero": false, "max": false, "min": false, - "show": false, + "rightSide": true, + "show": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 2, @@ -348,7 +231,7 @@ "steppedLine": true, "targets": [ { - "expr": "avg(increase(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[$__interval])) by (task_name) * 1000 / $__interval_ms", + "expr": "avg(irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])) by (task_name)", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -358,7 +241,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "CPU time spent on each task (average per node)", + "title": "Task polling rate per second (average per node)", "tooltip": { "shared": true, "sort": 2, @@ -374,7 +257,7 @@ }, "yaxes": [ { - "format": "percentunit", + "format": "cps", "label": null, "logBase": 1, "max": null, @@ -407,16 +290,16 @@ "h": 6, "w": 24, "x": 0, - "y": 20 + "y": 13 }, "hiddenSeries": false, - "id": 30, + "id": 31, "interval": "", "legend": { "alignAsTable": true, - "avg": true, + "avg": false, "current": false, - "max": false, + "max": true, "min": false, "rightSide": true, "show": true, @@ -439,7 +322,7 @@ "steppedLine": true, "targets": [ { - "expr": "avg(rate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[5m])) by (task_name)", + "expr": "max(irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])) by (task_name)", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -449,7 +332,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Task polling rate per second (average per node)", + "title": "Task polling rate per second (maximum per node)", "tooltip": { "shared": true, "sort": 2, @@ -498,16 +381,16 @@ "h": 6, "w": 24, "x": 0, - "y": 26 + "y": 19 }, "hiddenSeries": false, - "id": 31, + "id": 15, "interval": "", "legend": { "alignAsTable": true, - "avg": false, + "avg": true, "current": false, - "max": true, + "max": false, "min": false, "rightSide": true, "show": true, @@ -515,8 +398,8 @@ "values": true }, "lines": true, - "linewidth": 2, - "nullPointMode": "null", + "linewidth": 1, + "nullPointMode": "null as zero", "options": { "dataLinks": [] }, @@ -530,7 +413,7 @@ "steppedLine": true, "targets": [ { - "expr": "max(rate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[5m])) by (task_name)", + "expr": "avg by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m]))", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -540,7 +423,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Task polling rate per second (maximum per node)", + "title": "Number of tasks started per second (average per node)", "tooltip": { "shared": true, "sort": 2, @@ -556,11 +439,11 @@ }, "yaxes": [ { - "format": "cps", + "format": "short", "label": null, - "logBase": 1, + "logBase": 10, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -569,7 +452,7 @@ "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -589,21 +472,21 @@ "h": 6, "w": 24, "x": 0, - "y": 32 + "y": 25 }, "hiddenSeries": false, - "id": 15, + "id": 16, "interval": "", "legend": { - "alignAsTable": false, + "alignAsTable": true, "avg": false, "current": false, - "max": false, + "max": true, "min": false, - "rightSide": false, - "show": false, + "rightSide": true, + "show": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, @@ -621,7 +504,7 @@ "steppedLine": true, "targets": [ { - "expr": "avg by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[5m]))", + "expr": "max by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m]))", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -631,7 +514,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks started per second (average per node)", + "title": "Number of tasks started per second (maximum over all nodes)", "tooltip": { "shared": true, "sort": 2, @@ -680,21 +563,21 @@ "h": 6, "w": 24, "x": 0, - "y": 38 + "y": 31 }, "hiddenSeries": false, - "id": 16, + "id": 2, "interval": "", "legend": { - "alignAsTable": false, - "avg": false, + "alignAsTable": true, + "avg": true, "current": false, "max": false, "min": false, - "rightSide": false, - "show": false, + "rightSide": true, + "show": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, @@ -712,7 +595,7 @@ "steppedLine": true, "targets": [ { - "expr": "max by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[5m]))", + "expr": "avg by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -722,7 +605,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks started per second (maximum over all nodes)", + "title": "Number of tasks running (average per node)", "tooltip": { "shared": true, "sort": 2, @@ -771,21 +654,21 @@ "h": 6, "w": 24, "x": 0, - "y": 44 + "y": 37 }, "hiddenSeries": false, - "id": 2, + "id": 3, "interval": "", "legend": { - "alignAsTable": false, + "alignAsTable": true, "avg": false, "current": false, - "max": false, + "max": true, "min": false, - "rightSide": false, - "show": false, + "rightSide": true, + "show": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, @@ -803,7 +686,7 @@ "steppedLine": true, "targets": [ { - "expr": "avg by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", + "expr": "max by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -813,7 +696,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks running (average per node)", + "title": "Number of tasks running (maximum over all nodes)", "tooltip": { "shared": true, "sort": 2, @@ -856,27 +739,30 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "decimals": null, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 50 + "y": 43 }, "hiddenSeries": false, - "id": 3, + "id": 7, "interval": "", "legend": { - "alignAsTable": false, - "avg": false, + "alignAsTable": true, + "avg": true, "current": false, + "hideEmpty": true, + "hideZero": true, "max": false, "min": false, - "rightSide": false, - "show": false, + "rightSide": true, + "show": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, @@ -890,11 +776,11 @@ "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": true, "targets": [ { - "expr": "max by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", + "expr": "avg(\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[10m])\n - ignoring(le)\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[10m])\n) by (task_name) > 0", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -904,11 +790,11 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks running (maximum over all nodes)", + "title": "Calls to `Future::poll` that took more than one second (average per node)", "tooltip": { "shared": true, "sort": 2, - "value_type": "individual" + "value_type": "cumulative" }, "type": "graph", "xaxis": { @@ -920,9 +806,10 @@ }, "yaxes": [ { - "format": "short", - "label": null, - "logBase": 10, + "decimals": null, + "format": "cps", + "label": "Calls to `Future::poll`/second", + "logBase": 1, "max": null, "min": "0", "show": true @@ -933,7 +820,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -941,6 +828,20 @@ "alignLevel": null } }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 49 + }, + "id": 27, + "panels": [], + "title": "Misc", + "type": "row" + }, { "aliasColors": {}, "bars": false, @@ -950,21 +851,18 @@ "fill": 0, "fillGradient": 0, "gridPos": { - "h": 6, + "h": 7, "w": 24, "x": 0, - "y": 56 + "y": 50 }, "hiddenSeries": false, - "id": 7, - "interval": "", + "id": 32, "legend": { "alignAsTable": true, "avg": true, "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, + "max": true, "min": false, "rightSide": true, "show": true, @@ -973,7 +871,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { "dataLinks": [] }, @@ -983,25 +881,25 @@ "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, - "stack": true, - "steppedLine": true, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "avg(\n rate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[1m])\n - ignoring(le)\n rate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[1m])\n) by (task_name) > 0", + "expr": "avg(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"} - ignoring(action) ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"received\"}) by (entity)", "interval": "", - "legendFormat": "{{task_name}}", - "refId": "A" + "legendFormat": "{{entity}}", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Calls to `Future::poll` that took more than one second (average per node)", + "title": "Unbounded channels size (average per node)", "tooltip": { "shared": true, "sort": 2, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -1013,11 +911,11 @@ }, "yaxes": [ { - "format": "cps", - "label": "Calls to `Future::poll`/second", + "format": "short", + "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -1034,20 +932,6 @@ "alignLevel": null } }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 62 - }, - "id": 27, - "panels": [], - "title": "Misc", - "type": "row" - }, { "aliasColors": {}, "bars": false, @@ -1060,18 +944,20 @@ "h": 7, "w": 24, "x": 0, - "y": 63 + "y": 57 }, "hiddenSeries": false, - "id": 23, + "id": 33, "legend": { - "avg": false, + "alignAsTable": true, + "avg": true, "current": false, "max": false, "min": false, - "show": false, + "rightSide": true, + "show": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, @@ -1089,17 +975,17 @@ "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_threads{instance=~\"${nodename}\"}", + "expr": "avg(irate(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}[10m])) by (entity)", "interval": "", - "legendFormat": "{{instance}}", - "refId": "A" + "legendFormat": "{{entity}}", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of threads", + "title": "Unbounded channels rate (average per node)", "tooltip": { "shared": true, "sort": 2, @@ -1115,7 +1001,7 @@ }, "yaxes": [ { - "format": "short", + "format": "cps", "label": null, "logBase": 1, "max": null, @@ -1137,7 +1023,7 @@ } } ], - "refresh": "30s", + "refresh": false, "schemaVersion": 22, "style": "dark", "tags": [], @@ -1147,7 +1033,7 @@ "allValue": null, "current": {}, "datasource": "$data_source", - "definition": "${metric_namespace}_cpu_usage_percentage", + "definition": "${metric_namespace}_process_start_time_seconds", "hide": 0, "includeAll": true, "index": -1, @@ -1155,7 +1041,7 @@ "multi": true, "name": "nodename", "options": [], - "query": "${metric_namespace}_cpu_usage_percentage", + "query": "${metric_namespace}_process_start_time_seconds", "refresh": 1, "regex": "/instance=\"(.*?)\"/", "skipUrlSync": false, @@ -1228,5 +1114,5 @@ "variables": { "list": [] }, - "version": 44 + "version": 52 } diff --git a/Cargo.lock b/Cargo.lock index ec5af8aca4ecf42b9d8f8751f26f112e8d9222b0..3a661ed15d242174a2646953ce3f101d12381ee7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,12 +25,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" -[[package]] -name = "adler32" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" - [[package]] name = "aead" version = "0.3.2" @@ -46,23 +40,11 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" dependencies = [ - "aes-soft 0.4.0", - "aesni 0.7.0", + "aes-soft", + "aesni", "block-cipher", ] -[[package]] -name = "aes-ctr" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2e5b0458ea3beae0d1d8c0f3946564f8e10f90646cf78c06b4351052058d1ee" -dependencies = [ - "aes-soft 0.3.3", - "aesni 0.6.0", - "ctr", - "stream-cipher 0.3.2", -] - [[package]] name = "aes-gcm" version = "0.6.0" @@ -73,18 +55,7 @@ dependencies = [ "aes", "block-cipher", "ghash", - "subtle 2.2.2", -] - -[[package]] -name = "aes-soft" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" -dependencies = [ - "block-cipher-trait", - "byteorder", - "opaque-debug 0.2.3", + "subtle 2.2.3", ] [[package]] @@ -94,19 +65,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" dependencies = [ "block-cipher", - "byteorder", - "opaque-debug 0.2.3", -] - -[[package]] -name = "aesni" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" -dependencies = [ - "block-cipher-trait", + "byteorder 1.3.4", "opaque-debug 0.2.3", - "stream-cipher 0.3.2", ] [[package]] @@ -136,9 +96,9 @@ checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" [[package]] name = "aho-corasick" -version = "0.7.10" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" +checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" dependencies = [ "memchr", ] @@ -160,7 +120,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -169,14 +129,14 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "anyhow" -version = "1.0.28" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a60d744a80c30fcb657dfe2c1b22bcb3e814c1a1e3674f32bf5820b570fbff" +checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" [[package]] name = "approx" @@ -189,15 +149,15 @@ dependencies = [ [[package]] name = "arbitrary" -version = "0.4.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75153c95fdedd7db9732dfbfc3702324a1627eec91ba56e37cd0ac78314ab2ed" +checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4" [[package]] name = "arc-swap" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825" +checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" [[package]] name = "arrayref" @@ -258,6 +218,17 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +[[package]] +name = "async-channel" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee81ba99bee79f3c8ae114ae4baa7eaa326f63447cf2ec65e4393618b63f8770" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + [[package]] name = "async-std" version = "1.6.2" @@ -274,7 +245,7 @@ dependencies = [ "log", "memchr", "num_cpus", - "once_cell", + "once_cell 1.4.0", "pin-project-lite", "pin-utils", "slab", @@ -300,12 +271,29 @@ dependencies = [ "webpki-roots 0.19.0", ] +[[package]] +name = "async-trait" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "atomic" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" +[[package]] +name = "atomic-waker" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" + [[package]] name = "atty" version = "0.2.14" @@ -314,7 +302,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -338,7 +326,7 @@ dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide 0.4.0", + "miniz_oxide", "object 0.20.0", "rustc-demangle", ] @@ -363,26 +351,26 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "bincode" -version = "1.2.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf" +checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" dependencies = [ - "byteorder", + "byteorder 1.3.4", "serde", ] [[package]] name = "bindgen" -version = "0.53.2" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb26d6a69a335b8cb0e7c7e9775cd5666611dc50a37177c3f2cedcfc040e8c8" +checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" dependencies = [ "bitflags", "cexpr", "cfg-if", "clang-sys", "clap", - "env_logger 0.7.1", + "env_logger", "lazy_static", "lazycell", "log", @@ -395,6 +383,21 @@ dependencies = [ "which", ] +[[package]] +name = "bip39" +version = "0.6.0-beta.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" +dependencies = [ + "failure", + "hashbrown 0.1.8", + "hmac", + "once_cell 0.1.8", + "pbkdf2", + "rand 0.6.5", + "sha2 0.8.2", +] + [[package]] name = "bitflags" version = "1.2.1" @@ -424,7 +427,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84ce5b6108f8e154604bd4eb76a2f726066c3464d5a552a4229262a18c9bb471" dependencies = [ "byte-tools", - "byteorder", + "byteorder 1.3.4", "crypto-mac 0.8.0", "digest 0.9.0", "opaque-debug 0.2.3", @@ -470,7 +473,7 @@ checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ "block-padding", "byte-tools", - "byteorder", + "byteorder 1.3.4", "generic-array 0.12.3", ] @@ -492,15 +495,6 @@ dependencies = [ "generic-array 0.14.3", ] -[[package]] -name = "block-cipher-trait" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774" -dependencies = [ - "generic-array 0.12.3", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -512,13 +506,14 @@ dependencies = [ [[package]] name = "blocking" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d17efb70ce4421e351d61aafd90c16a20fb5bfe339fcdc32a86816280e62ce0" +checksum = "d2468ff7bf85066b4a3678fede6fe66db31846d753ff0adfbfab2c6a6e81612b" dependencies = [ - "futures-channel", - "futures-util", - "once_cell", + "async-channel", + "atomic-waker", + "futures-lite", + "once_cell 1.4.0", "parking", "waker-fn", ] @@ -531,9 +526,9 @@ checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" [[package]] name = "bstr" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41" +checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" dependencies = [ "lazy_static", "memchr", @@ -552,9 +547,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.2.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "byte-slice-cast" @@ -568,6 +563,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "byteorder" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" + [[package]] name = "byteorder" version = "1.3.4" @@ -580,16 +581,16 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" dependencies = [ - "byteorder", + "byteorder 1.3.4", "either", "iovec", ] [[package]] name = "bytes" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "c_linked_list" @@ -599,17 +600,17 @@ checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" [[package]] name = "cache-padded" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24508e28c677875c380c20f4d28124fab6f8ed4ef929a1397d7b1a31e92f1005" +checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cargo_metadata" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8de60b887edf6d74370fc8eb177040da4847d971d6234c7b13a6da324ef0caf" +checksum = "052dbdd9db69a339d5fa9ac87bfe2e1319f709119f0345988a597af82bb1011c" dependencies = [ - "semver 0.9.0", + "semver 0.10.0", "serde", "serde_derive", "serde_json", @@ -626,9 +627,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.50" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" dependencies = [ "jobserver", ] @@ -673,7 +674,7 @@ dependencies = [ [[package]] name = "chain-spec-builder" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "ansi_term 0.12.1", "node-cli", @@ -686,9 +687,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2" +checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" dependencies = [ "js-sys", "num-integer", @@ -703,16 +704,16 @@ version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" dependencies = [ - "glob 0.3.0", + "glob", "libc", "libloading", ] [[package]] name = "clap" -version = "2.33.0" +version = "2.33.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" +checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" dependencies = [ "ansi_term 0.11.0", "atty", @@ -733,12 +734,12 @@ dependencies = [ ] [[package]] -name = "cmake" -version = "0.1.42" +name = "cloudabi" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fb25b677f8bf1eb325017cb6bb8452f87969db0fedb4f757b297bee78a7c62" +checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" dependencies = [ - "cc", + "bitflags", ] [[package]] @@ -833,7 +834,7 @@ version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d9badfe36176cb653506091693bc2bb1970c9bddfcd6ec7fac404f7eaec6f38" dependencies = [ - "byteorder", + "byteorder 1.3.4", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", @@ -921,43 +922,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0363053954f3e679645fc443321ca128b7b950a6fe288cf5f9335cc22ee58394" -dependencies = [ - "atty", - "cast", - "clap", - "criterion-plot 0.3.1", - "csv", - "itertools 0.8.2", - "lazy_static", - "libc", - "num-traits", - "rand_core 0.3.1", - "rand_os", - "rand_xoshiro", - "rayon", - "rayon-core", - "serde", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc755679c12bda8e5523a71e4d654b6bf2e14bd838dfc48cde6559a05caf7d1" +checksum = "70daa7ceec6cf143990669a04c7df13391d55fb27bd4079d252fca774ba244d8" dependencies = [ "atty", "cast", "clap", - "criterion-plot 0.4.1", + "criterion-plot", "csv", - "itertools 0.8.2", + "itertools 0.9.0", "lazy_static", "num-traits", "oorandom", @@ -965,6 +939,7 @@ dependencies = [ "rayon", "regex", "serde", + "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -973,23 +948,12 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f9212ddf2f4a9eb2d401635190600656a1f88a932ef53d06e7fa4c7e02fb8e" -dependencies = [ - "byteorder", - "cast", - "itertools 0.8.2", -] - -[[package]] -name = "criterion-plot" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01e15e0ea58e8234f96146b1f91fa9d0e4dd7a38da93ff7a75d42c0b9d3a545" +checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" dependencies = [ "cast", - "itertools 0.8.2", + "itertools 0.9.0", ] [[package]] @@ -1015,17 +979,18 @@ dependencies = [ "lazy_static", "maybe-uninit", "memoffset", - "scopeguard", + "scopeguard 1.1.0", ] [[package]] name = "crossbeam-queue" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db" +checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ "cfg-if", "crossbeam-utils", + "maybe-uninit", ] [[package]] @@ -1062,7 +1027,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array 0.14.3", - "subtle 2.2.2", + "subtle 2.2.3", ] [[package]] @@ -1098,48 +1063,48 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.13" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1" +checksum = "39858aa5bac06462d4dd4b9164848eb81ffc4aa5c479746393598fd193afa227" dependencies = [ "quote", "syn", ] [[package]] -name = "ctr" +name = "cuckoofilter" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022cd691704491df67d25d006fe8eca083098253c4d43516c2206479c58c6736" +checksum = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" dependencies = [ - "block-cipher-trait", - "stream-cipher 0.3.2", + "byteorder 0.5.3", + "rand 0.3.23", ] [[package]] name = "curve25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" +checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" dependencies = [ - "byteorder", + "byteorder 1.3.4", "digest 0.8.1", "rand_core 0.5.1", - "subtle 2.2.2", + "subtle 2.2.3", "zeroize", ] [[package]] name = "data-encoding" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11c0346158a19b3627234e15596f5e465c360fcdb97d817bcb255e0510f5a788" +checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" [[package]] name = "derive_more" -version = "0.99.5" +version = "0.99.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" +checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" dependencies = [ "proc-macro2", "quote", @@ -1182,14 +1147,13 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ - "cfg-if", "libc", "redox_users", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1204,7 +1168,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ - "byteorder", + "byteorder 1.3.4", "quick-error", ] @@ -1260,49 +1224,36 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.8.1", + "sha2 0.8.2", "zeroize", ] [[package]] name = "either" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" +checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" [[package]] name = "enumflags2" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a80e524ebf194285b57e5e7944018721c7fffc673253f5183f7accd88a2a3b0c" +checksum = "83c8d82922337cd23a15f88b70d8e4ef5f11da38dd7cdb55e84dd5de99695da0" dependencies = [ "enumflags2_derive", ] [[package]] name = "enumflags2_derive" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ed9afacaea0301eefb738c9deea725e6d53938004597cdc518a8cf9a7aa2f03" +checksum = "946ee94e3dbf58fdd324f9ce245c7b238d46a66f00e86a020b71996349e46cce" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "env_logger" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.7.1" @@ -1324,22 +1275,22 @@ checksum = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" [[package]] name = "erased-serde" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88b6d1705e16a4d62e05ea61cc0496c2bd190f4fa8e5c1f11ce747be6bcf3d1" +checksum = "6ca8b296792113e1500fd935ae487be6e00ce318952a6880555554824d6ebf38" dependencies = [ "serde", ] [[package]] name = "errno" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b480f641ccf0faf324e20c1d3e53d81b7484c698b42ea677f6907ae4db195371" +checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01" dependencies = [ "errno-dragonfly", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1361,8 +1312,8 @@ dependencies = [ "crunchy", "fixed-hash", "impl-rlp", - "impl-serde 0.3.0", - "tiny-keccak 2.0.2", + "impl-serde", + "tiny-keccak", ] [[package]] @@ -1374,11 +1325,17 @@ dependencies = [ "ethbloom", "fixed-hash", "impl-rlp", - "impl-serde 0.3.0", + "impl-serde", "primitive-types", "uint", ] +[[package]] +name = "event-listener" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "699d84875f1b72b4da017e6b0f77dfa88c0137f089958a88974d15938cbc2976" + [[package]] name = "evm" version = "0.17.0" @@ -1436,9 +1393,9 @@ dependencies = [ [[package]] name = "failure" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" +checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" dependencies = [ "backtrace", "failure_derive", @@ -1446,9 +1403,9 @@ dependencies = [ [[package]] name = "failure_derive" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2", "quote", @@ -1470,26 +1427,26 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b90eb1dec02087df472ab9f0db65f27edaa654a746830042688bcc2eaf68090f" +checksum = "36a9cb09840f81cd211e435d00a4e487edd263dc3c8ff815c32dd76ad668ebed" [[package]] name = "fdlimit" -version = "0.1.4" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da54a593b34c71b889ee45f5b5bb900c74148c5f7f8c6a9479ee7899f69603c" +checksum = "47bc6e222b8349b2bd0acb85a1d16d22852376b3ceed2a7f09c2692c3d8a78d0" dependencies = [ "libc", ] [[package]] name = "file-per-thread-logger" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8505b75b31ef7285168dd237c4a7db3c1f3e0927e7d314e670bc98e854272fe9" +checksum = "8b3937f028664bd0e13df401ba49a4567ccda587420365823242977f06609ed1" dependencies = [ - "env_logger 0.6.2", + "env_logger", "log", ] @@ -1511,11 +1468,11 @@ dependencies = [ [[package]] name = "fixed-hash" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32529fc42e86ec06e5047092082aab9ad459b070c5d2a76b14f4f5ce70bf2e84" +checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" dependencies = [ - "byteorder", + "byteorder 1.3.4", "rand 0.7.3", "rustc-hex", "static_assertions", @@ -1529,36 +1486,37 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" +checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" dependencies = [ "cfg-if", "crc32fast", "libc", "libz-sys", - "miniz_oxide 0.3.6", + "miniz_oxide", ] [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", ] [[package]] name = "frame-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", + "hex-literal", "linregress", "parity-scale-codec", "paste", @@ -1567,11 +1525,12 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-std", + "sp-storage", ] [[package]] name = "frame-benchmarking-cli" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "parity-scale-codec", @@ -1588,7 +1547,7 @@ dependencies = [ [[package]] name = "frame-executive" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -1608,7 +1567,7 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "11.0.0-rc5" +version = "12.0.0" dependencies = [ "parity-scale-codec", "serde", @@ -1618,7 +1577,7 @@ dependencies = [ [[package]] name = "frame-support" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "bitmask", "frame-metadata", @@ -1626,9 +1585,9 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", - "once_cell", + "once_cell 1.4.0", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "paste", "pretty_assertions", "serde", @@ -1645,7 +1604,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support-procedural-tools", "proc-macro2", @@ -1655,7 +1614,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -1666,7 +1625,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "proc-macro2", "quote", @@ -1675,8 +1634,9 @@ dependencies = [ [[package]] name = "frame-support-test" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ + "frame-metadata", "frame-support", "parity-scale-codec", "pretty_assertions", @@ -1693,9 +1653,9 @@ dependencies = [ [[package]] name = "frame-system" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "criterion 0.2.11", + "criterion", "frame-support", "impl-trait-for-tuples", "parity-scale-codec", @@ -1711,7 +1671,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -1726,7 +1686,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -1741,7 +1701,7 @@ dependencies = [ "lazy_static", "libc", "libloading", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1751,7 +1711,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1878,6 +1838,21 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +[[package]] +name = "futures-lite" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180d8fc9819eb48a0c976672fbeea13a73e10999e812bdc9e14644c25ad51d60" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.5" @@ -1902,7 +1877,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" dependencies = [ - "once_cell", + "once_cell 1.4.0", ] [[package]] @@ -1960,7 +1935,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures 0.3.5", "memchr", "pin-project", @@ -2051,12 +2026,6 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" -[[package]] -name = "glob" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" - [[package]] name = "glob" version = "0.3.0" @@ -2095,7 +2064,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" dependencies = [ - "byteorder", + "byteorder 1.3.4", "bytes 0.4.12", "fnv", "futures 0.1.29", @@ -2109,23 +2078,29 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" +checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.1", "indexmap", - "log", "slab", - "tokio 0.2.21", + "tokio 0.2.22", "tokio-util", + "tracing", ] +[[package]] +name = "half" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" + [[package]] name = "hash-db" version = "0.15.2" @@ -2141,6 +2116,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "hashbrown" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" +dependencies = [ + "byteorder 1.3.4", + "scopeguard 0.3.3", +] + [[package]] name = "hashbrown" version = "0.6.3" @@ -2153,9 +2138,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9b7860757ce258c89fd48d28b68c41713e597a7b09e793f6c6a6e2ea37c827" +checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" dependencies = [ "ahash 0.3.8", "autocfg 1.0.0", @@ -2172,9 +2157,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.10" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" dependencies = [ "libc", ] @@ -2187,22 +2172,15 @@ checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" [[package]] name = "hex-literal" -version = "0.2.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" -dependencies = [ - "hex-literal-impl", - "proc-macro-hack", -] +checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" [[package]] -name = "hex-literal-impl" -version = "0.2.1" +name = "hex_fmt" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d4c5c844e2fee0bf673d54c2c177f1713b3d2af2ff6e666b49cb7572e6cf42d" -dependencies = [ - "proc-macro-hack", -] +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hmac" @@ -2253,7 +2231,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "itoa", ] @@ -2276,7 +2254,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "http 0.2.1", ] @@ -2327,25 +2305,25 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.4" +version = "0.13.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" +checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures-channel", "futures-core", "futures-util", - "h2 0.2.4", + "h2 0.2.6", "http 0.2.1", "http-body 0.3.1", "httparse", "itoa", - "log", - "net2", "pin-project", + "socket2", "time", - "tokio 0.2.21", + "tokio 0.2.22", "tower-service", + "tracing", "want 0.3.0", ] @@ -2355,14 +2333,14 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "ct-logs", "futures-util", - "hyper 0.13.4", + "hyper 0.13.7", "log", "rustls", "rustls-native-certs", - "tokio 0.2.21", + "tokio 0.2.22", "tokio-rustls", "webpki", ] @@ -2409,18 +2387,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-serde" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bbe9ea9b182f0fb1cabbd61f4ff9b7b7b9197955e95a7e4c27de5055eb29ff8" +checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" dependencies = [ "serde", ] @@ -2438,14 +2407,21 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.3.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" +checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7" dependencies = [ "autocfg 1.0.0", + "hashbrown 0.8.1", "serde", ] +[[package]] +name = "instant" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" + [[package]] name = "integer-sqrt" version = "0.1.3" @@ -2503,9 +2479,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "jobserver" @@ -2518,18 +2494,18 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.37" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" +checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonrpc-client-transports" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecbdaacc17243168d9d1fa6b2bd7556a27e1e60a621d8a2a6e590ae2b145d158" +checksum = "c6f7b1cdf66312002e15682a24430728bd13036c641163c016bc53fb686a7c2d" dependencies = [ "failure", "futures 0.1.29", @@ -2544,9 +2520,9 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0747307121ffb9703afd93afbd0fb4f854c38fb873f2c8b90e0e902f27c7b62" +checksum = "f30b12567a31d48588a65b6cf870081e6ba1d7b2ae353977cb9820d512e69c70" dependencies = [ "futures 0.1.29", "log", @@ -2557,18 +2533,18 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34221123bc79b66279a3fde2d3363553835b43092d629b34f2e760c44dc94713" +checksum = "d175ca0cf77439b5495612bf216c650807d252d665b4b70ab2eebd895a88fac1" dependencies = [ "jsonrpc-client-transports", ] [[package]] name = "jsonrpc-derive" -version = "14.2.1" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fadf6945e227246825a583514534d864554e9f23d80b3c77d034b10983db5ef" +checksum = "c2cc6ea7f785232d9ca8786a44e9fa698f92149dcdc1acc4aa1fc69c4993d79e" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2578,9 +2554,9 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da906d682799df05754480dac1b9e70ec92e12c19ebafd2662a5ea1c9fd6522" +checksum = "9996b26c0c7a59626d0ed6c5ec8bf06218e62ce1474bd2849f9b9fd38a0158c0" dependencies = [ "hyper 0.12.35", "jsonrpc-core", @@ -2593,9 +2569,9 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dedccd693325d833963b549e959137f30a7a0ea650cde92feda81dc0c1393cb5" +checksum = "b8e8f2278fb2b277175b6e21b23e7ecf30e78daff5ee301d0a2a411d9a821a0a" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", @@ -2607,9 +2583,9 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d44f5602a11d657946aac09357956d2841299ed422035edf140c552cb057986" +checksum = "f389c5cd1f3db258a99296892c21047e21ae73ff4c0e2d39650ea86fe994b4c7" dependencies = [ "jsonrpc-core", "log", @@ -2620,9 +2596,9 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56cbfb462e7f902e21121d9f0d1c2b77b2c5b642e1a4e8f4ebfa2e15b94402bb" +checksum = "c623e1895d0d9110cb0ea7736cfff13191ff52335ad33b21bd5c775ea98b27af" dependencies = [ "bytes 0.4.12", "globset", @@ -2636,16 +2612,16 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "14.2.0" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903d3109fe7c4acb932b567e1e607e0f524ed04741b09fb0e61841bc40a022fc" +checksum = "436a92034d0137ab3e3c64a7a6350b428f31cb4d7d1a89f284bcdbcd98a7bc56" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", "log", + "parity-ws", "parking_lot 0.10.2", "slab", - "ws", ] [[package]] @@ -2656,13 +2632,13 @@ checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" [[package]] name = "keccak-hasher" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3468207deea1359a0e921591ae9b4c928733d94eb9d6a2eeda994cfd59f42cf8" +checksum = "711adba9940a039f4374fc5724c0a5eaca84a2d558cce62256bfe26f0dbef05e" dependencies = [ "hash-db", "hash256-std-hasher", - "tiny-keccak 1.5.0", + "tiny-keccak", ] [[package]] @@ -2677,67 +2653,46 @@ dependencies = [ [[package]] name = "kv-log-macro" -version = "1.0.4" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c54d9f465d530a752e6ebdc217e081a7a614b48cb200f6f0aee21ba6bc9aabb" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ "log", ] -[[package]] -name = "kvdb" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e763b2a9b500ba47948061d1e8bc3b5f03a8a1f067dbcf822a4d2c84d2b54a3a" -dependencies = [ - "parity-util-mem 0.6.0", - "smallvec 1.4.1", -] - [[package]] name = "kvdb" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" dependencies = [ - "parity-util-mem 0.7.0", + "parity-util-mem", "smallvec 1.4.1", ] -[[package]] -name = "kvdb-memorydb" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73027d5e228de6f503b5b7335d530404fc26230a6ae3e09b33ec6e45408509a4" -dependencies = [ - "kvdb 0.6.0", - "parity-util-mem 0.6.0", - "parking_lot 0.10.2", -] - [[package]] name = "kvdb-memorydb" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73de822b260a3bdfb889dbbb65bb2d473eee2253973d6fa4a5d149a2a4a7c66e" dependencies = [ - "kvdb 0.7.0", - "parity-util-mem 0.7.0", + "kvdb", + "parity-util-mem", "parking_lot 0.10.2", ] [[package]] name = "kvdb-rocksdb" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c341ef15cfb1f923fa3b5138bfbd2d4813a2c1640b473727a53351c7f0b0fa2" +checksum = "44947dd392f09475af614d740fe0320b66d01cb5b977f664bbbb5e45a70ea4c1" dependencies = [ "fs-swap", - "kvdb 0.7.0", + "kvdb", "log", "num_cpus", "owning_ref", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "regex", "rocksdb", @@ -2752,10 +2707,10 @@ checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" dependencies = [ "futures 0.3.5", "js-sys", - "kvdb 0.7.0", - "kvdb-memorydb 0.7.0", + "kvdb", + "kvdb-memorydb", "log", - "parity-util-mem 0.7.0", + "parity-util-mem", "send_wrapper 0.3.0", "wasm-bindgen", "web-sys", @@ -2781,9 +2736,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.71" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" +checksum = "bd7d4bd64732af4bf3a67f367c27df8520ad7e230c5817b8ff485864d80242b9" [[package]] name = "libloading" @@ -2792,7 +2747,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" dependencies = [ "cc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2803,26 +2758,32 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.23.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1ebb6c031584a5af181fe3a1e4b074af5d0b1a3b31663200f0251f4bcff6b5c" +checksum = "571f5a4604c1a40d75651da141dfde29ad15329f537a779528803297d2220274" dependencies = [ "atomic", - "bytes 0.5.4", + "bytes 0.5.6", "futures 0.3.5", "lazy_static", "libp2p-core", "libp2p-core-derive", + "libp2p-deflate", "libp2p-dns", + "libp2p-floodsub", + "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-mdns", "libp2p-mplex", "libp2p-noise", "libp2p-ping", - "libp2p-secio", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-request-response", "libp2p-swarm", "libp2p-tcp", + "libp2p-uds", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", @@ -2836,9 +2797,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.20.1" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a694fd76d7c33a45a0e6e1525e9b9b5d11127c9c94e560ac0f8abba54ed80af" +checksum = "52f13ba8c7df0768af2eb391696d562c7de88cc3a35122531aaa6a7d77754d25" dependencies = [ "asn1_der", "bs58", @@ -2860,7 +2821,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.8.1", + "sha2 0.8.2", "smallvec 1.4.1", "thiserror", "unsigned-varint 0.4.0", @@ -2870,30 +2831,84 @@ dependencies = [ [[package]] name = "libp2p-core-derive" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "515c4a7cba5d321bb88ed3ed803997bdd5634ce35c9c5e8e9ace9c512e57eceb" +checksum = "f753d9324cd3ec14bf04b8a8cd0d269c87f294153d6bf2a84497a63a5ad22213" dependencies = [ "quote", "syn", ] +[[package]] +name = "libp2p-deflate" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74029ae187f35f4b8ddf26b9779a68b340045d708528a103917cdca49a296db5" +dependencies = [ + "flate2", + "futures 0.3.5", + "libp2p-core", +] + [[package]] name = "libp2p-dns" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751924b6b98e350005e0b87a822beb246792a3fb878c684e088f866158120ac" +checksum = "7cf319822e08dd65c8e060d2354e9f952895bbc433f5706c75ed010c152aee5e" dependencies = [ "futures 0.3.5", "libp2p-core", "log", ] +[[package]] +name = "libp2p-floodsub" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a9acb43a3e4a4e413e0c4abe0fa49308df7c6335c88534757b647199cb8a51" +dependencies = [ + "cuckoofilter", + "fnv", + "futures 0.3.5", + "libp2p-core", + "libp2p-swarm", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.4.1", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab20fcb60edebe3173bbb708c6ac3444afdf1e3152dc2866b10c4f5497f17467" +dependencies = [ + "base64 0.11.0", + "byteorder 1.3.4", + "bytes 0.5.6", + "fnv", + "futures 0.3.5", + "futures_codec", + "hex_fmt", + "libp2p-core", + "libp2p-swarm", + "log", + "lru_time_cache", + "prost", + "prost-build", + "rand 0.7.3", + "sha2 0.8.2", + "smallvec 1.4.1", + "unsigned-varint 0.4.0", + "wasm-timer", +] + [[package]] name = "libp2p-identify" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "912c00a7bf67e0e765daf0cc37e08f675ea26aba3d6d1fbfaee81f19a4c23049" +checksum = "56396ee63aa9164eacf40c2c5d2bda8c4133c2f57e1b0425d51d3a4e362583b1" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -2907,12 +2922,12 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.21.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44ed3a4c8111c570ab2bffb30c6353178d7603ce3787e3c5f2493c8d3d16d1f0" +checksum = "cc7fa9047f8b8f544278a35c2d9d45d3b2c1785f2d86d4e1629d6edf97be3955" dependencies = [ "arrayvec 0.5.1", - "bytes 0.5.4", + "bytes 0.5.6", "either", "fnv", "futures 0.3.5", @@ -2924,7 +2939,7 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.1", + "sha2 0.8.2", "smallvec 1.4.1", "uint", "unsigned-varint 0.4.0", @@ -2934,9 +2949,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd004c668160fd922f7268b2cd1e4550ff69165d9c744e9eb5770086eb753d02" +checksum = "3173b5a6b2f690c29ae07798d85b9441a131ac76ddae9015ef22905b623d0c69" dependencies = [ "async-std", "data-encoding", @@ -2956,11 +2971,11 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ae0ffacd30f073f96cd518b2c9cd2cb18ac27c3d136a4b23cf1af99f33e541" +checksum = "8a73a799cc8410b36e40b8f4c4b6babbcb9efd3727111bf517876e4acfa612d3" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "futures 0.3.5", "futures_codec", @@ -2972,11 +2987,11 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e594f2de0c23c2b7ad14802c991a2e68e95315c6a6c7715e53801506f20135d" +checksum = "6ef6c490042f549fb1025f2892dfe6083d97a77558f450c1feebe748ca9eb15a" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "curve25519-dalek", "futures 0.3.5", "lazy_static", @@ -2985,7 +3000,7 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.1", + "sha2 0.8.2", "snow", "static_assertions", "x25519-dalek", @@ -2994,9 +3009,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70130cf130e4ba6dc177366e72dd9f86f9e3588fa1a0c4145247e676f16affad" +checksum = "ad063c21dfcea4518ac9e8bd4119d33a5b26c41e674f602f41f05617a368a5c8" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -3008,41 +3023,64 @@ dependencies = [ ] [[package]] -name = "libp2p-secio" -version = "0.20.0" +name = "libp2p-plaintext" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff43513c383f7cdab2736eb98465fc4c5dd5d1988df89749dc8a68950349d56" +checksum = "903a12e99c72dbebefea258de887982adeacc7025baa1ceb10b7fa9928f54791" dependencies = [ - "aes-ctr", - "ctr", + "bytes 0.5.6", "futures 0.3.5", - "hmac", - "js-sys", - "lazy_static", + "futures_codec", "libp2p-core", "log", - "parity-send-wrapper", - "pin-project", "prost", "prost-build", - "quicksink", - "rand 0.7.3", - "ring", "rw-stream-sink", - "sha2 0.8.1", - "static_assertions", - "twofish", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", + "unsigned-varint 0.4.0", + "void", +] + +[[package]] +name = "libp2p-pnet" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d0db10e139d22d7af0b23ed7949449ec86262798aa0fd01595abdbcb02dc87" +dependencies = [ + "futures 0.3.5", + "log", + "pin-project", + "rand 0.7.3", + "salsa20", + "sha3", +] + +[[package]] +name = "libp2p-request-response" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c0c9e8a4cd69d97e9646c54313d007512f411aba8c5226cfcda16df6a6e84a3" +dependencies = [ + "async-trait", + "bytes 0.5.6", + "futures 0.3.5", + "libp2p-core", + "libp2p-swarm", + "log", + "lru 0.6.0", + "minicbor", + "rand 0.7.3", + "smallvec 1.4.1", + "unsigned-varint 0.5.1", + "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.20.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88d5e2a090a2aadf042cd33484e2f015c6dab212567406a59deece5dedbd133" +checksum = "7193e444210132237b81b755ec7fe53f1c4bd2f53cf719729b94c0c72eb6eaa1" dependencies = [ + "either", "futures 0.3.5", "libp2p-core", "log", @@ -3054,9 +3092,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1fa2bbad054020cb875546a577a66a65a5bf42eff55ed5265f92ffee3cc052" +checksum = "44f42ec130d7a37a7e47bf4398026b7ad9185c08ed26972e2720f8b94112796f" dependencies = [ "async-std", "futures 0.3.5", @@ -3068,11 +3106,23 @@ dependencies = [ "socket2", ] +[[package]] +name = "libp2p-uds" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea7acb0a034f70d7db94c300eba3f65c0f6298820105624088a9609c9974d77" +dependencies = [ + "async-std", + "futures 0.3.5", + "libp2p-core", + "log", +] + [[package]] name = "libp2p-wasm-ext" -version = "0.20.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0feb99e32fea20ffb1bbf56a6fb2614bff7325ff44a515728385170b3420d2c3" +checksum = "34c1faac6f92c21fbe155417957863ea822fba9e9fd5eb24c0912336a100e63f" dependencies = [ "futures 0.3.5", "js-sys", @@ -3084,9 +3134,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.21.1" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046a5201f6e471f22b22b394e4d084269ed1e28cf7300f7b49874385db84c7bd" +checksum = "d650534ebd99f48f6fa292ed5db10d30df2444943afde4407ceeddab8e513fca" dependencies = [ "async-tls", "either", @@ -3104,26 +3154,26 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.20.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46ae9bf2f7d8a4be9c7e9b61df9de9dc1bd66419d669098f22f81f8d9571029a" +checksum = "781d9b9f043dcdabc40640807125368596b849fd4d96cdca2dcf052fdf6f33fd" dependencies = [ "futures 0.3.5", "libp2p-core", - "parking_lot 0.10.2", + "parking_lot 0.11.0", "thiserror", "yamux", ] [[package]] name = "librocksdb-sys" -version = "6.7.4" +version = "6.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883213ae3d09bfc3d104aefe94b25ebb183b6f4d3a515b23b14817e1f4854005" +checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09" dependencies = [ "bindgen", "cc", - "glob 0.3.0", + "glob", "libc", ] @@ -3138,8 +3188,8 @@ dependencies = [ "digest 0.8.1", "hmac-drbg", "rand 0.7.3", - "sha2 0.8.1", - "subtle 2.2.2", + "sha2 0.8.2", + "subtle 2.2.3", "typenum", ] @@ -3157,15 +3207,15 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" +checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" [[package]] name = "linked_hash_set" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7c91c4c7bbeb4f2f7c4e5be11e6a05bd6830bc37249c47ce1ad86ad453ff9c" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" dependencies = [ "linked-hash-map", ] @@ -3183,27 +3233,54 @@ dependencies = [ [[package]] name = "lite-json" -version = "0.1.0" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c73e713a23ac6e12074c9e96ef2dfb770921e0cb9244c093bd38424209e0e523" +dependencies = [ + "lite-parser", +] + +[[package]] +name = "lite-parser" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa835713bb12ba5204013497da16caf2dd2eee25ca829d0efaa054fb38c4ddd" +checksum = "0c50092e40e0ccd1bf2015a10333fde0502ff95b832b0895dc1ca0d7ac6c52f6" dependencies = [ "paste", ] +[[package]] +name = "lock_api" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" +dependencies = [ + "scopeguard 0.3.3", +] + [[package]] name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "scopeguard", + "scopeguard 1.1.0", +] + +[[package]] +name = "lock_api" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +dependencies = [ + "scopeguard 1.1.0", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if", ] @@ -3226,6 +3303,21 @@ dependencies = [ "hashbrown 0.6.3", ] +[[package]] +name = "lru" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" +dependencies = [ + "hashbrown 0.8.1", +] + +[[package]] +name = "lru_time_cache" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb241df5c4caeb888755363fc95f8a896618dc0d435e9e775f7930cb099beab" + [[package]] name = "mach" version = "0.3.2" @@ -3278,14 +3370,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "memoffset" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" dependencies = [ "autocfg 1.0.0", ] @@ -3297,8 +3389,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" dependencies = [ "hash-db", - "hashbrown 0.8.0", - "parity-util-mem 0.7.0", + "hashbrown 0.8.1", + "parity-util-mem", ] [[package]] @@ -3313,19 +3405,30 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" dependencies = [ - "byteorder", + "byteorder 1.3.4", "keccak", "rand_core 0.5.1", "zeroize", ] [[package]] -name = "miniz_oxide" -version = "0.3.6" +name = "minicbor" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5" +checksum = "2fc03ad6f8f548db7194a5ff5a6f96342ecae4e3ef67d2bf18bacc0e245cd041" dependencies = [ - "adler32", + "minicbor-derive", +] + +[[package]] +name = "minicbor-derive" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c214bf3d90099b52f3e4b328ae0fe34837fd0fab683ad1e10fceb4629106df48" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -3339,9 +3442,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ "cfg-if", "fuchsia-zircon", @@ -3370,21 +3473,21 @@ dependencies = [ [[package]] name = "mio-named-pipes" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" +checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ "log", "mio", "miow 0.3.5", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "mio-uds" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ "iovec", "libc", @@ -3410,7 +3513,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" dependencies = [ "socket2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3421,15 +3524,15 @@ checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" [[package]] name = "multihash" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae32179a9904ccc6e063de8beee7f5dd55fae85ecb851ca923d55722bc28cf5d" +checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" dependencies = [ "blake2b_simd", "blake2s_simd", "digest 0.8.1", "sha-1", - "sha2 0.8.1", + "sha2 0.8.2", "sha3", "unsigned-varint 0.3.3", ] @@ -3446,7 +3549,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9157e87afbc2ef0d84cc0345423d715f445edde00141c93721c162de35a05e5" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures 0.3.5", "log", "pin-project", @@ -3482,13 +3585,13 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ "cfg-if", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3506,14 +3609,14 @@ dependencies = [ [[package]] name = "node-bench" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "fs_extra", "futures 0.3.5", "hash-db", "hex", - "kvdb 0.7.0", + "kvdb", "kvdb-rocksdb", "lazy_static", "log", @@ -3521,7 +3624,7 @@ dependencies = [ "node-runtime", "node-testing", "parity-db", - "parity-util-mem 0.7.0", + "parity-util-mem", "rand 0.7.3", "sc-basic-authorship", "sc-cli", @@ -3536,6 +3639,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-timestamp", + "sp-tracing", "sp-transaction-pool", "sp-trie", "structopt", @@ -3544,7 +3648,7 @@ dependencies = [ [[package]] name = "node-browser-testing" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -3561,7 +3665,7 @@ dependencies = [ [[package]] name = "node-cli" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "assert_cmd", "frame-benchmarking-cli", @@ -3569,8 +3673,6 @@ dependencies = [ "frame-system", "futures 0.3.5", "hex-literal", - "jsonrpc-core", - "jsonrpc-pubsub", "log", "nix", "node-executor", @@ -3629,6 +3731,7 @@ dependencies = [ "structopt", "substrate-browser-utils", "substrate-build-script-utils", + "substrate-frame-cli", "tempfile", "tracing", "wasm-bindgen", @@ -3637,9 +3740,9 @@ dependencies = [ [[package]] name = "node-executor" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "criterion 0.3.1", + "criterion", "frame-benchmarking", "frame-support", "frame-system", @@ -3666,12 +3769,12 @@ dependencies = [ "sp-trie", "substrate-test-client", "trie-root", - "wabt", + "wat", ] [[package]] name = "node-inspect" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "log", @@ -3687,7 +3790,7 @@ dependencies = [ [[package]] name = "node-primitives" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-system", "parity-scale-codec", @@ -3700,10 +3803,9 @@ dependencies = [ [[package]] name = "node-rpc" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "jsonrpc-core", - "jsonrpc-pubsub", "node-primitives", "node-runtime", "pallet-contracts-rpc", @@ -3722,26 +3824,27 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-runtime", "sp-transaction-pool", "substrate-frame-rpc-system", ] [[package]] name = "node-rpc-client" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "env_logger 0.7.1", "futures 0.1.29", "hyper 0.12.35", "jsonrpc-core-client", "log", "node-primitives", "sc-rpc", + "sp-tracing", ] [[package]] name = "node-runtime" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3810,8 +3913,10 @@ dependencies = [ [[package]] name = "node-template" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ + "frame-benchmarking", + "frame-benchmarking-cli", "jsonrpc-core", "node-template-runtime", "pallet-transaction-payment-rpc", @@ -3843,12 +3948,15 @@ dependencies = [ [[package]] name = "node-template-runtime" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ + "frame-benchmarking", "frame-executive", "frame-support", "frame-system", + "frame-system-benchmarking", "frame-system-rpc-runtime-api", + "hex-literal", "pallet-aura", "pallet-balances", "pallet-grandpa", @@ -3876,9 +3984,9 @@ dependencies = [ [[package]] name = "node-testing" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "criterion 0.3.1", + "criterion", "frame-support", "frame-system", "fs_extra", @@ -3917,7 +4025,6 @@ dependencies = [ "sp-timestamp", "substrate-test-client", "tempfile", - "wabt", ] [[package]] @@ -3934,9 +4041,9 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "5.1.1" +version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6" +checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" dependencies = [ "memchr", "version_check", @@ -3965,9 +4072,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" dependencies = [ "autocfg 1.0.0", "num-traits", @@ -3987,9 +4094,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" dependencies = [ "autocfg 1.0.0", "libm", @@ -3997,9 +4104,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", "libc", @@ -4024,18 +4131,27 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.3.1" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" +checksum = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" dependencies = [ - "parking_lot 0.9.0", + "parking_lot 0.7.1", +] + +[[package]] +name = "once_cell" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +dependencies = [ + "parking_lot 0.10.2", ] [[package]] name = "oorandom" -version = "11.1.0" +version = "11.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcec7c9c2a95cacc7cd0ecb89d8a8454eca13906f6deb55258ffff0adeb9405" +checksum = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c" [[package]] name = "opaque-debug" @@ -4061,7 +4177,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4075,7 +4191,7 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4089,7 +4205,7 @@ dependencies = [ [[package]] name = "pallet-atomic-swap" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4104,7 +4220,7 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4126,7 +4242,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4144,7 +4260,7 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4160,7 +4276,7 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4189,7 +4305,7 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4203,23 +4319,9 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-benchmark" -version = "2.0.0-rc5" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-collective" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4236,7 +4338,7 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "assert_matches", "bitflags", @@ -4264,7 +4366,7 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -4273,7 +4375,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4292,7 +4394,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", @@ -4303,7 +4405,7 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4323,7 +4425,7 @@ dependencies = [ [[package]] name = "pallet-elections" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4339,7 +4441,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4358,7 +4460,7 @@ dependencies = [ [[package]] name = "pallet-evm" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "evm", "frame-support", @@ -4380,7 +4482,7 @@ dependencies = [ [[package]] name = "pallet-example" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4396,7 +4498,7 @@ dependencies = [ [[package]] name = "pallet-example-offchain-worker" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4411,7 +4513,7 @@ dependencies = [ [[package]] name = "pallet-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4426,23 +4528,9 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-generic-asset" -version = "2.0.0-rc5" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-grandpa" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "finality-grandpa", "frame-benchmarking", @@ -4471,7 +4559,7 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4488,7 +4576,7 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4507,27 +4595,43 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0-rc5" +version = "2.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-membership" +version = "2.0.0" dependencies = [ - "frame-benchmarking", "frame-support", "frame-system", - "pallet-balances", "parity-scale-codec", "serde", "sp-core", "sp-io", - "sp-keyring", "sp-runtime", "sp-std", ] [[package]] -name = "pallet-membership" -version = "2.0.0-rc5" +name = "pallet-multisig" +version = "2.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "pallet-balances", "parity-scale-codec", "serde", "sp-core", @@ -4537,10 +4641,9 @@ dependencies = [ ] [[package]] -name = "pallet-multisig" -version = "2.0.0-rc5" +name = "pallet-nicks" +version = "2.0.0" dependencies = [ - "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", @@ -4553,12 +4656,11 @@ dependencies = [ ] [[package]] -name = "pallet-nicks" -version = "2.0.0-rc5" +name = "pallet-node-authorization" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", - "pallet-balances", "parity-scale-codec", "serde", "sp-core", @@ -4569,7 +4671,7 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4585,7 +4687,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4610,7 +4712,7 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4627,7 +4729,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4641,7 +4743,7 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "enumflags2", "frame-support", @@ -4657,7 +4759,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4673,7 +4775,7 @@ dependencies = [ [[package]] name = "pallet-scored-pool" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4688,7 +4790,7 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4709,7 +4811,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4731,7 +4833,7 @@ dependencies = [ [[package]] name = "pallet-society" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4747,9 +4849,8 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "env_logger 0.7.1", "frame-benchmarking", "frame-support", "frame-system", @@ -4771,6 +4872,7 @@ dependencies = [ "sp-staking", "sp-std", "sp-storage", + "sp-tracing", "static_assertions", "substrate-test-utils", ] @@ -4798,7 +4900,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -4809,7 +4911,7 @@ dependencies = [ [[package]] name = "pallet-sudo" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4823,7 +4925,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4835,7 +4937,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4853,7 +4955,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -4871,7 +4973,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4888,7 +4990,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "parity-scale-codec", @@ -4901,7 +5003,7 @@ dependencies = [ [[package]] name = "pallet-treasury" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4918,7 +5020,7 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -4934,7 +5036,7 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4967,13 +5069,13 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc20af3143a62c16e7c9e92ea5c6ae49f7d271d97d4d8fe73afc28f0514a3d0f" +checksum = "2165a93382a93de55868dcbfa11e4a8f99676a9164eee6a2b4a9479ad319c257" dependencies = [ "arrayref", "bs58", - "byteorder", + "byteorder 1.3.4", "data-encoding", "multihash", "percent-encoding 2.1.0", @@ -5030,20 +5132,7 @@ dependencies = [ "tokio 0.1.22", "tokio-named-pipes", "tokio-uds", - "winapi 0.3.8", -] - -[[package]] -name = "parity-util-mem" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e42755f26e5ea21a6a819d9e63cbd70713e9867a2b767ec2cc65ca7659532c5" -dependencies = [ - "cfg-if", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot 0.10.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5054,14 +5143,14 @@ checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ "cfg-if", "ethereum-types", - "hashbrown 0.8.0", + "hashbrown 0.8.1", "impl-trait-for-tuples", "lru 0.5.3", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", "smallvec 1.4.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5081,7 +5170,7 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac" dependencies = [ - "byteorder", + "byteorder 1.3.4", ] [[package]] @@ -5090,11 +5179,39 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" +[[package]] +name = "parity-ws" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e02a625dd75084c2a7024f07c575b61b782f729d18702dabb3cdbf31911dc61" +dependencies = [ + "byteorder 1.3.4", + "bytes 0.4.12", + "httparse", + "log", + "mio", + "mio-extras", + "rand 0.7.3", + "sha-1", + "slab", + "url 2.1.1", +] + [[package]] name = "parking" -version = "1.0.3" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d4a6da31f8144a32532fe38fe8fb439a6842e0ec633f0037f0144c14e7f907" + +[[package]] +name = "parking_lot" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4029bc3504a62d92e42f30b9095fdef73b8a0b2a06aa41ce2935143b05a1a06" +checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" +dependencies = [ + "lock_api 0.1.5", + "parking_lot_core 0.4.0", +] [[package]] name = "parking_lot" @@ -5102,7 +5219,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ - "lock_api", + "lock_api 0.3.4", "parking_lot_core 0.6.2", "rustc_version", ] @@ -5113,8 +5230,32 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ - "lock_api", - "parking_lot_core 0.7.1", + "lock_api 0.3.4", + "parking_lot_core 0.7.2", +] + +[[package]] +name = "parking_lot" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +dependencies = [ + "instant", + "lock_api 0.4.1", + "parking_lot_core 0.8.0", +] + +[[package]] +name = "parking_lot_core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" +dependencies = [ + "libc", + "rand 0.6.5", + "rustc_version", + "smallvec 0.6.13", + "winapi 0.3.9", ] [[package]] @@ -5124,33 +5265,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ "cfg-if", - "cloudabi", + "cloudabi 0.0.3", "libc", "redox_syscall", "rustc_version", "smallvec 0.6.13", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "parking_lot_core" -version = "0.7.1" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" +dependencies = [ + "cfg-if", + "cloudabi 0.0.3", + "libc", + "redox_syscall", + "smallvec 1.4.1", + "winapi 0.3.9", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb" +checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" dependencies = [ "cfg-if", - "cloudabi", + "cloudabi 0.1.0", + "instant", "libc", "redox_syscall", "smallvec 1.4.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "paste" -version = "0.1.10" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4fb1930692d1b6a9cfabdde3d06ea0a7d186518e2f4d67660d8970e2fa647a" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" dependencies = [ "paste-impl", "proc-macro-hack", @@ -5158,14 +5314,11 @@ dependencies = [ [[package]] name = "paste-impl" -version = "0.1.10" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62486e111e571b1e93b710b61e8f493c0013be39629b714cb166bdb06aa5a8a" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" dependencies = [ "proc-macro-hack", - "proc-macro2", - "quote", - "syn", ] [[package]] @@ -5174,8 +5327,9 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" dependencies = [ - "byteorder", + "byteorder 1.3.4", "crypto-mac 0.7.0", + "rayon", ] [[package]] @@ -5204,9 +5358,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "petgraph" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c127eea4a29ec6c85d153c59dc1213f33ec74cead30fe4730aecc88cc1fd92" +checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" dependencies = [ "fixedbitset", "indexmap", @@ -5234,9 +5388,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae" +checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" [[package]] name = "pin-utils" @@ -5246,9 +5400,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" +checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" [[package]] name = "platforms" @@ -5258,9 +5412,9 @@ checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" [[package]] name = "plotters" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3bb8da247d27ae212529352020f3e5ee16e83c0c258061d27b08ab92675eeb" +checksum = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb" dependencies = [ "js-sys", "num-traits", @@ -5289,15 +5443,15 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "predicates" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "347a1b6f0b21e636bc9872fb60b83b8e185f6f5516298b8238699f7f9a531030" +checksum = "96bfead12e90dccead362d62bb2c90a5f6fc4584963645bc7f71a735e0b0735a" dependencies = [ "difference", "predicates-core", @@ -5333,31 +5487,31 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e4b9943a2da369aec5e96f7c10ebc74fcf434d39590d974b0a3460e6f67fbb" +checksum = "c55c21c64d0eaa4d7ed885d959ef2d62d9e488c27c0e02d9aa5ce6c877b7d5f8" dependencies = [ "fixed-hash", "impl-codec", "impl-rlp", - "impl-serde 0.3.0", + "impl-serde", "uint", ] [[package]] name = "proc-macro-crate" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ "toml", ] [[package]] name = "proc-macro-error" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e9e4b82e0ef281812565ea4751049f1bdcdfccda7d3f459f2e138a40c08678" +checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" dependencies = [ "proc-macro-error-attr", "proc-macro2", @@ -5368,9 +5522,9 @@ dependencies = [ [[package]] name = "proc-macro-error-attr" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53" +checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" dependencies = [ "proc-macro2", "quote", @@ -5381,35 +5535,36 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.15" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" +checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" [[package]] name = "proc-macro-nested" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" +checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" dependencies = [ "unicode-xid", ] [[package]] name = "prometheus" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd" +checksum = "30d70cf4412832bcac9cffe27906f4a66e450d323525e977168c70d1b36120ae" dependencies = [ "cfg-if", "fnv", "lazy_static", - "spin", + "parking_lot 0.11.0", + "regex", "thiserror", ] @@ -5419,7 +5574,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "prost-derive", ] @@ -5429,7 +5584,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "heck", "itertools 0.8.2", "log", @@ -5460,7 +5615,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "prost", ] @@ -5470,7 +5625,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" dependencies = [ - "byteorder", + "byteorder 1.3.4", "log", "parity-wasm 0.41.0", ] @@ -5487,7 +5642,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" dependencies = [ - "env_logger 0.7.1", + "env_logger", "log", "rand 0.7.3", "rand_core 0.5.1", @@ -5506,9 +5661,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ "proc-macro2", ] @@ -5539,7 +5694,7 @@ dependencies = [ "libc", "rand_core 0.3.1", "rdrand", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5548,11 +5703,11 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" dependencies = [ - "cloudabi", + "cloudabi 0.0.3", "fuchsia-cprng", "libc", "rand_core 0.3.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5571,7 +5726,7 @@ dependencies = [ "rand_os", "rand_pcg 0.1.2", "rand_xorshift", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5667,7 +5822,7 @@ checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ "libc", "rand_core 0.4.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5676,13 +5831,13 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi", + "cloudabi 0.0.3", "fuchsia-cprng", "libc", "rand_core 0.4.2", "rdrand", "wasm-bindgen", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5713,16 +5868,6 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "rand_xoshiro" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b418169fb9c46533f326efd6eed2576699c44ca92d3052a066214a8d828929" -dependencies = [ - "byteorder", - "rand_core 0.3.1", -] - [[package]] name = "raw-cpuid" version = "7.0.3" @@ -5742,10 +5887,11 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098" +checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080" dependencies = [ + "autocfg 1.0.0", "crossbeam-deque", "either", "rayon-core", @@ -5753,9 +5899,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9" +checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280" dependencies = [ "crossbeam-deque", "crossbeam-queue", @@ -5775,9 +5921,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_users" @@ -5792,18 +5938,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a214c7875e1b63fc1618db7c80efc0954f6156c9ff07699fd9039e255accdd1" +checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" +checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" dependencies = [ "proc-macro2", "quote", @@ -5823,9 +5969,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.6" +version = "1.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" dependencies = [ "aho-corasick", "memchr", @@ -5839,15 +5985,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" dependencies = [ - "byteorder", + "byteorder 1.3.4", "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" [[package]] name = "region" @@ -5858,37 +6004,16 @@ dependencies = [ "bitflags", "libc", "mach", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "remove_dir_all" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" -dependencies = [ - "winapi 0.3.8", -] - -[[package]] -name = "rental" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8545debe98b2b139fb04cad8618b530e9b07c152d99a5de83c860b877d67847f" -dependencies = [ - "rental-impl", - "stable_deref_trait", -] - -[[package]] -name = "rental-impl" -version = "0.5.5" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "475e68978dc5b743f2f40d8e0a8fdc83f1c5e78cbf4b8fa5e74e73beebc340de" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "proc-macro2", - "quote", - "syn", + "winapi 0.3.9", ] [[package]] @@ -5899,17 +6024,17 @@ checksum = "e005d658ad26eacc2b6c506dfde519f4e277e328d0eb3379ca61647d70a8f531" [[package]] name = "ring" -version = "0.16.12" +version = "0.16.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" +checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" dependencies = [ "cc", - "lazy_static", "libc", + "once_cell 1.4.0", "spin", "untrusted", "web-sys", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5934,9 +6059,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61aa17a99a2413cd71c1106691bf59dad7de0cd5099127f90e9d99c429c40d4a" +checksum = "23d83c02c429044d58474eaf5ae31e062d0de894e21125b47437ec0edc1397e6" dependencies = [ "libc", "librocksdb-sys", @@ -5949,7 +6074,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -6018,9 +6143,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" +checksum = "b9bdc5e856e51e685846fb6c13a1f5e5432946c2c90501bdc76a1319f19e29da" dependencies = [ "proc-macro2", "quote", @@ -6040,9 +6165,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] name = "safe-mix" @@ -6053,6 +6178,26 @@ dependencies = [ "rustc_version", ] +[[package]] +name = "salsa20" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2324b0e8c3bb9a586a571fdb3136f70e7e2c748de00a78043f86e0cff91f91fe" +dependencies = [ + "byteorder 1.3.4", + "salsa20-core", + "stream-cipher 0.3.2", +] + +[[package]] +name = "salsa20-core" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fe6cc1b9f5a5867853ade63099de70f042f7679e408d1ffe52821c9248e6e69" +dependencies = [ + "stream-cipher 0.3.2", +] + [[package]] name = "same-file" version = "1.0.6" @@ -6064,11 +6209,11 @@ dependencies = [ [[package]] name = "sc-authority-discovery" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "derive_more", - "env_logger 0.7.1", + "either", "futures 0.3.5", "futures-timer 3.0.2", "libp2p", @@ -6088,13 +6233,14 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", + "sp-tracing", "substrate-prometheus-endpoint", "substrate-test-runtime-client", ] [[package]] name = "sc-basic-authorship" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -6120,7 +6266,7 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -6138,9 +6284,10 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "impl-trait-for-tuples", + "parity-scale-codec", "sc-chain-spec-derive", "sc-network", "sc-telemetry", @@ -6153,7 +6300,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6163,32 +6310,39 @@ dependencies = [ [[package]] name = "sc-cli" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "ansi_term 0.12.1", "atty", + "bip39", "chrono", "derive_more", - "env_logger 0.7.1", "fdlimit", "futures 0.3.5", + "hex", "lazy_static", + "libp2p", "log", "names", "nix", - "parity-util-mem 0.7.0", + "parity-scale-codec", + "parity-util-mem", + "rand 0.7.3", "regex", "rpassword", "sc-client-api", "sc-informant", + "sc-keystore", "sc-network", "sc-service", "sc-telemetry", "sc-tracing", "serde", "serde_json", + "sp-application-crypto", "sp-blockchain", "sp-core", + "sp-io", "sp-keyring", "sp-panic-handler", "sp-runtime", @@ -6199,20 +6353,23 @@ dependencies = [ "substrate-prometheus-endpoint", "tempfile", "time", - "tokio 0.2.21", + "tokio 0.2.22", + "tracing", + "tracing-log", + "tracing-subscriber", ] [[package]] name = "sc-client-api" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "derive_more", "fnv", "futures 0.3.5", "hash-db", "hex-literal", - "kvdb 0.7.0", - "kvdb-memorydb 0.6.0", + "kvdb", + "kvdb-memorydb", "lazy_static", "log", "parity-scale-codec", @@ -6242,19 +6399,18 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "blake2-rfc", - "env_logger 0.7.1", "hash-db", - "kvdb 0.7.0", - "kvdb-memorydb 0.7.0", + "kvdb", + "kvdb-memorydb", "kvdb-rocksdb", "linked-hash-map", "log", "parity-db", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "quickcheck", "sc-client-api", @@ -6268,6 +6424,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-state-machine", + "sp-tracing", "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -6276,7 +6433,7 @@ dependencies = [ [[package]] name = "sc-consensus" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "sc-client-api", "sp-blockchain", @@ -6286,10 +6443,9 @@ dependencies = [ [[package]] name = "sc-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", - "env_logger 0.7.1", "futures 0.3.5", "futures-timer 3.0.2", "log", @@ -6316,6 +6472,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-timestamp", + "sp-tracing", "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -6324,10 +6481,9 @@ dependencies = [ [[package]] name = "sc-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", - "env_logger 0.7.1", "fork-tree", "futures 0.3.5", "futures-timer 3.0.2", @@ -6368,6 +6524,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-timestamp", + "sp-tracing", "sp-utils", "sp-version", "substrate-prometheus-endpoint", @@ -6377,7 +6534,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "futures 0.3.5", @@ -6405,7 +6562,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "fork-tree", "parity-scale-codec", @@ -6417,11 +6574,10 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "assert_matches", "derive_more", - "env_logger 0.7.1", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", @@ -6430,29 +6586,37 @@ dependencies = [ "parking_lot 0.10.2", "sc-basic-authorship", "sc-client-api", + "sc-consensus-babe", + "sc-consensus-epochs", + "sc-keystore", "sc-transaction-pool", "serde", + "sp-api", "sp-blockchain", "sp-consensus", + "sp-consensus-babe", "sp-core", "sp-inherents", "sp-runtime", + "sp-timestamp", "sp-transaction-pool", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "tempfile", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] name = "sc-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "futures 0.3.5", + "futures-timer 3.0.2", "log", "parity-scale-codec", + "parking_lot 0.10.2", "sc-client-api", "sp-api", "sp-block-builder", @@ -6468,7 +6632,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -6491,7 +6655,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "log", "sc-client-api", @@ -6504,7 +6668,7 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "assert_matches", "derive_more", @@ -6536,13 +6700,14 @@ dependencies = [ "substrate-test-runtime", "test-case", "tracing", - "wabt", + "tracing-subscriber", "wasmi", + "wat", ] [[package]] name = "sc-executor-common" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "log", @@ -6558,7 +6723,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "log", "parity-scale-codec", @@ -6572,7 +6737,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "assert_matches", "log", @@ -6590,11 +6755,10 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "assert_matches", "derive_more", - "env_logger 0.7.1", "finality-grandpa", "fork-tree", "futures 0.3.5", @@ -6626,16 +6790,17 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-state-machine", + "sp-tracing", "sp-utils", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] name = "sc-finality-grandpa-rpc" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "finality-grandpa", @@ -6648,6 +6813,7 @@ dependencies = [ "log", "parity-scale-codec", "sc-block-builder", + "sc-client-api", "sc-finality-grandpa", "sc-network-test", "sc-rpc", @@ -6664,12 +6830,12 @@ dependencies = [ [[package]] name = "sc-informant" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "ansi_term 0.12.1", "futures 0.3.5", "log", - "parity-util-mem 0.7.0", + "parity-util-mem", "sc-client-api", "sc-network", "sp-blockchain", @@ -6681,7 +6847,7 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "derive_more", "hex", @@ -6691,13 +6857,13 @@ dependencies = [ "serde_json", "sp-application-crypto", "sp-core", - "subtle 2.2.2", + "subtle 2.2.3", "tempfile", ] [[package]] name = "sc-light" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "hash-db", "lazy_static", @@ -6715,16 +6881,16 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "assert_matches", "async-std", + "async-trait", "bitflags", "bs58", - "bytes 0.5.4", + "bytes 0.5.6", "derive_more", "either", - "env_logger 0.7.1", "erased-serde", "fnv", "fork-tree", @@ -6761,6 +6927,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-test-primitives", + "sp-tracing", "sp-utils", "substrate-prometheus-endpoint", "substrate-test-runtime", @@ -6775,7 +6942,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "async-std", "futures 0.3.5", @@ -6793,9 +6960,8 @@ dependencies = [ [[package]] name = "sc-network-test" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ - "env_logger 0.7.1", "futures 0.3.5", "futures-timer 3.0.2", "libp2p", @@ -6812,6 +6978,7 @@ dependencies = [ "sp-consensus-babe", "sp-core", "sp-runtime", + "sp-tracing", "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", @@ -6819,14 +6986,13 @@ dependencies = [ [[package]] name = "sc-offchain" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "bytes 0.5.4", - "env_logger 0.7.1", + "bytes 0.5.6", "fnv", "futures 0.3.5", "futures-timer 3.0.2", - "hyper 0.13.4", + "hyper 0.13.7", "hyper-rustls", "lazy_static", "log", @@ -6843,16 +7009,17 @@ dependencies = [ "sp-core", "sp-offchain", "sp-runtime", + "sp-tracing", "sp-transaction-pool", "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] name = "sc-peerset" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.3.5", "libp2p", @@ -6865,7 +7032,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -6873,7 +7040,7 @@ dependencies = [ [[package]] name = "sc-rpc" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "assert_matches", "futures 0.1.29", @@ -6912,7 +7079,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "futures 0.3.5", @@ -6935,8 +7102,9 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ + "futures 0.1.29", "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-ipc-server", @@ -6946,11 +7114,12 @@ dependencies = [ "serde", "serde_json", "sp-runtime", + "substrate-prometheus-endpoint", ] [[package]] name = "sc-runtime-test" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "sp-allocator", "sp-core", @@ -6963,7 +7132,7 @@ dependencies = [ [[package]] name = "sc-service" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "async-std", "derive_more", @@ -6978,7 +7147,7 @@ dependencies = [ "lazy_static", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "pin-project", "rand 0.7.3", @@ -7015,6 +7184,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-state-machine", + "sp-tracing", "sp-transaction-pool", "sp-trie", "sp-utils", @@ -7022,16 +7192,15 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.21", + "tokio 0.2.22", "tracing", "wasm-timer", ] [[package]] name = "sc-service-test" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "env_logger 0.7.1", "fdlimit", "futures 0.1.29", "futures 0.3.5", @@ -7055,6 +7224,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-storage", + "sp-tracing", "sp-transaction-pool", "sp-trie", "substrate-test-runtime", @@ -7065,12 +7235,11 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ - "env_logger 0.7.1", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.10.2", "sc-client-api", @@ -7079,7 +7248,7 @@ dependencies = [ [[package]] name = "sc-telemetry" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", @@ -7099,7 +7268,7 @@ dependencies = [ [[package]] name = "sc-tracing" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "erased-serde", "log", @@ -7111,21 +7280,22 @@ dependencies = [ "slog", "sp-tracing", "tracing", + "tracing-core", "tracing-subscriber", ] [[package]] name = "sc-transaction-graph" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "assert_matches", - "criterion 0.3.1", + "criterion", "derive_more", "futures 0.3.5", "linked-hash-map", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "retain_mut", "serde", @@ -7140,7 +7310,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "assert_matches", "derive_more", @@ -7150,7 +7320,7 @@ dependencies = [ "intervalier", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "sc-block-builder", "sc-client-api", @@ -7172,12 +7342,12 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -7193,8 +7363,8 @@ dependencies = [ "merlin", "rand 0.7.3", "rand_core 0.5.1", - "sha2 0.8.1", - "subtle 2.2.2", + "sha2 0.8.2", + "subtle 2.2.3", "zeroize", ] @@ -7204,6 +7374,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +[[package]] +name = "scopeguard" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" + [[package]] name = "scopeguard" version = "1.1.0" @@ -7221,9 +7397,9 @@ dependencies = [ [[package]] name = "scroll_derive" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" +checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" dependencies = [ "proc-macro2", "quote", @@ -7286,6 +7462,15 @@ name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190" dependencies = [ "semver-parser", "serde", @@ -7324,6 +7509,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_cbor" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.114" @@ -7360,9 +7555,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" dependencies = [ "block-buffer 0.7.3", "digest 0.8.1", @@ -7505,12 +7700,12 @@ dependencies = [ "futures-io", "futures-util", "libc", - "once_cell", + "once_cell 1.4.0", "scoped-tls", "slab", "socket2", "wepoll-sys-stjepang", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -7527,7 +7722,7 @@ dependencies = [ "ring", "rustc_version", "sha2 0.9.1", - "subtle 2.2.2", + "subtle 2.2.3", "x25519-dalek", ] @@ -7540,7 +7735,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -7550,7 +7745,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" dependencies = [ "base64 0.12.3", - "bytes 0.5.4", + "bytes 0.5.6", "flate2", "futures 0.3.5", "httparse", @@ -7561,7 +7756,7 @@ dependencies = [ [[package]] name = "sp-allocator" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "derive_more", "log", @@ -7572,7 +7767,7 @@ dependencies = [ [[package]] name = "sp-api" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "hash-db", "parity-scale-codec", @@ -7587,7 +7782,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "blake2-rfc", "proc-macro-crate", @@ -7598,9 +7793,9 @@ dependencies = [ [[package]] name = "sp-api-test" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "criterion 0.3.1", + "criterion", "parity-scale-codec", "rustversion", "sc-block-builder", @@ -7617,7 +7812,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "serde", @@ -7628,7 +7823,7 @@ dependencies = [ [[package]] name = "sp-application-crypto-test" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "sp-api", "sp-application-crypto", @@ -7639,9 +7834,9 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "criterion 0.3.1", + "criterion", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -7655,7 +7850,7 @@ dependencies = [ [[package]] name = "sp-arithmetic-fuzzer" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "honggfuzz", "num-bigint", @@ -7666,7 +7861,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -7677,7 +7872,7 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7687,7 +7882,7 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -7698,7 +7893,7 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "derive_more", "log", @@ -7714,7 +7909,7 @@ dependencies = [ [[package]] name = "sp-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "serde", "serde_json", @@ -7722,7 +7917,7 @@ dependencies = [ [[package]] name = "sp-consensus" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "derive_more", "futures 0.3.5", @@ -7748,7 +7943,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -7761,7 +7956,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "merlin", "parity-scale-codec", @@ -7779,7 +7974,7 @@ dependencies = [ [[package]] name = "sp-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -7790,7 +7985,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -7798,7 +7993,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -7809,12 +8004,12 @@ dependencies = [ [[package]] name = "sp-core" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "base58", "blake2-rfc", - "byteorder", - "criterion 0.2.11", + "byteorder 1.3.4", + "criterion", "derive_more", "dyn-clonable", "ed25519-dalek", @@ -7823,14 +8018,14 @@ dependencies = [ "hash256-std-hasher", "hex", "hex-literal", - "impl-serde 0.3.0", + "impl-serde", "lazy_static", "libsecp256k1", "log", "merlin", "num-traits", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "parking_lot 0.10.2", "pretty_assertions", "primitive-types", @@ -7841,7 +8036,7 @@ dependencies = [ "secrecy", "serde", "serde_json", - "sha2 0.8.1", + "sha2 0.8.2", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -7850,7 +8045,7 @@ dependencies = [ "sp-storage", "substrate-bip39", "tiny-bip39", - "tiny-keccak 2.0.2", + "tiny-keccak", "twox-hash", "wasmi", "zeroize", @@ -7858,15 +8053,15 @@ dependencies = [ [[package]] name = "sp-database" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "kvdb 0.7.0", + "kvdb", "parking_lot 0.10.2", ] [[package]] name = "sp-debug-derive" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "proc-macro2", "quote", @@ -7875,7 +8070,7 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "environmental", "parity-scale-codec", @@ -7885,7 +8080,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "finality-grandpa", "log", @@ -7900,7 +8095,7 @@ dependencies = [ [[package]] name = "sp-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7909,7 +8104,7 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "derive_more", "parity-scale-codec", @@ -7920,7 +8115,7 @@ dependencies = [ [[package]] name = "sp-io" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.3.5", "hash-db", @@ -7936,11 +8131,13 @@ dependencies = [ "sp-tracing", "sp-trie", "sp-wasm-interface", + "tracing", + "tracing-core", ] [[package]] name = "sp-keyring" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "lazy_static", "sp-core", @@ -7950,7 +8147,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "rand 0.7.3", @@ -7964,7 +8161,7 @@ dependencies = [ [[package]] name = "sp-npos-elections-compact" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -7977,6 +8174,7 @@ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ "honggfuzz", + "parity-scale-codec", "rand 0.7.3", "sp-npos-elections", "sp-runtime", @@ -7985,7 +8183,7 @@ dependencies = [ [[package]] name = "sp-offchain" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "sp-api", "sp-core", @@ -7995,7 +8193,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "backtrace", "log", @@ -8003,7 +8201,7 @@ dependencies = [ [[package]] name = "sp-rpc" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "serde", "serde_json", @@ -8012,14 +8210,14 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", "log", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "paste", "rand 0.7.3", "serde", @@ -8035,7 +8233,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "primitive-types", @@ -8047,6 +8245,7 @@ dependencies = [ "sp-runtime-interface-test-wasm", "sp-state-machine", "sp-std", + "sp-storage", "sp-tracing", "sp-wasm-interface", "static_assertions", @@ -8055,7 +8254,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "Inflector", "proc-macro-crate", @@ -8066,7 +8265,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "sc-executor", "sp-core", @@ -8077,11 +8276,12 @@ dependencies = [ "sp-runtime-interface-test-wasm-deprecated", "sp-state-machine", "tracing", + "tracing-core", ] [[package]] name = "sp-runtime-interface-test-wasm" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "sp-core", "sp-io", @@ -8092,7 +8292,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test-wasm-deprecated" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "sp-core", "sp-io", @@ -8103,7 +8303,7 @@ dependencies = [ [[package]] name = "sp-sandbox" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "assert_matches", "parity-scale-codec", @@ -8111,13 +8311,13 @@ dependencies = [ "sp-io", "sp-std", "sp-wasm-interface", - "wabt", "wasmi", + "wat", ] [[package]] name = "sp-serializer" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "serde", "serde_json", @@ -8125,7 +8325,7 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-api", @@ -8137,7 +8337,7 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -8146,11 +8346,10 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "hash-db", "hex-literal", - "itertools 0.9.0", "log", "num-traits", "parity-scale-codec", @@ -8162,6 +8361,7 @@ dependencies = [ "sp-externalities", "sp-panic-handler", "sp-runtime", + "sp-std", "sp-trie", "trie-db", "trie-root", @@ -8169,13 +8369,14 @@ dependencies = [ [[package]] name = "sp-std" -version = "2.0.0-rc5" +version = "2.0.0" [[package]] name = "sp-storage" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "impl-serde 0.2.3", + "impl-serde", + "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive", @@ -8184,10 +8385,10 @@ dependencies = [ [[package]] name = "sp-test-primitives" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "serde", "sp-application-crypto", "sp-core", @@ -8196,7 +8397,7 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8209,16 +8410,19 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "log", - "rental", + "parity-scale-codec", + "sp-std", "tracing", + "tracing-core", + "tracing-subscriber", ] [[package]] name = "sp-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "derive_more", "futures 0.3.5", @@ -8232,9 +8436,9 @@ dependencies = [ [[package]] name = "sp-trie" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "criterion 0.2.11", + "criterion", "hash-db", "hex-literal", "memory-db", @@ -8250,7 +8454,7 @@ dependencies = [ [[package]] name = "sp-utils" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.3.5", "futures-core", @@ -8261,9 +8465,9 @@ dependencies = [ [[package]] name = "sp-version" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "impl-serde 0.2.3", + "impl-serde", "parity-scale-codec", "serde", "sp-runtime", @@ -8272,7 +8476,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8288,9 +8492,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "stable_deref_trait" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "static_assertions" @@ -8342,9 +8546,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6da2e8d107dfd7b74df5ef4d205c6aebee0706c647f6bc6a2d5789905c00fb" +checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" dependencies = [ "clap", "lazy_static", @@ -8353,9 +8557,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a489c87c08fbaf12e386665109dd13470dcc9c4583ea3e10dd2b4523e5ebd9ac" +checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" dependencies = [ "heck", "proc-macro-error", @@ -8387,49 +8591,33 @@ dependencies = [ [[package]] name = "subkey" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "clap", - "derive_more", "frame-system", - "futures 0.1.29", - "hex", - "hex-literal", - "hyper 0.12.35", - "itertools 0.8.2", - "jsonrpc-core-client", - "libp2p", "node-primitives", "node-runtime", - "pallet-balances", - "pallet-grandpa", - "pallet-transaction-payment", - "parity-scale-codec", - "rand 0.7.3", - "rpassword", - "sc-rpc", - "serde_json", + "sc-cli", "sp-core", - "sp-runtime", - "substrate-bip39", - "tiny-bip39", + "structopt", + "substrate-frame-cli", ] [[package]] name = "substrate-bip39" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c004e8166d6e0aa3a9d5fa673e5b7098ff25f930de1013a21341988151e681bb" +checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" dependencies = [ "hmac", "pbkdf2", "schnorrkel", - "sha2 0.8.1", + "sha2 0.8.2", + "zeroize", ] [[package]] name = "substrate-browser-utils" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "chrono", "console_error_panic_hook", @@ -8454,14 +8642,25 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "platforms", ] +[[package]] +name = "substrate-frame-cli" +version = "2.0.0" +dependencies = [ + "frame-system", + "sc-cli", + "sp-core", + "sp-runtime", + "structopt", +] + [[package]] name = "substrate-frame-rpc-support" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "frame-support", "frame-system", @@ -8472,14 +8671,13 @@ dependencies = [ "sc-rpc-api", "serde", "sp-storage", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ - "env_logger 0.7.1", "frame-system-rpc-runtime-api", "futures 0.3.5", "jsonrpc-core", @@ -8496,26 +8694,27 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", + "sp-tracing", "sp-transaction-pool", "substrate-test-runtime-client", ] [[package]] name = "substrate-prometheus-endpoint" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.4", + "hyper 0.13.7", "log", "prometheus", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] name = "substrate-test-client" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.1.29", "futures 0.3.5", @@ -8540,7 +8739,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "cfg-if", "frame-executive", @@ -8552,7 +8751,7 @@ dependencies = [ "pallet-babe", "pallet-timestamp", "parity-scale-codec", - "parity-util-mem 0.7.0", + "parity-util-mem", "sc-block-builder", "sc-executor", "sc-service", @@ -8563,6 +8762,7 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-babe", "sp-core", + "sp-externalities", "sp-finality-grandpa", "sp-inherents", "sp-io", @@ -8583,7 +8783,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-client" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.3.5", "parity-scale-codec", @@ -8603,7 +8803,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "derive_more", "futures 0.3.5", @@ -8618,18 +8818,18 @@ dependencies = [ [[package]] name = "substrate-test-utils" -version = "2.0.0-rc5" +version = "2.0.0" dependencies = [ "futures 0.3.5", "sc-service", "substrate-test-utils-derive", - "tokio 0.2.21", + "tokio 0.2.22", "trybuild", ] [[package]] name = "substrate-test-utils-derive" -version = "0.8.0-rc5" +version = "0.8.0" dependencies = [ "proc-macro-crate", "quote", @@ -8642,13 +8842,14 @@ version = "0.1.0" dependencies = [ "sc-service", "substrate-test-utils", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] name = "substrate-wasm-builder" version = "2.0.0" dependencies = [ + "ansi_term 0.12.1", "atty", "build-helper", "cargo_metadata", @@ -8672,15 +8873,15 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" +checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" [[package]] name = "syn" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" +checksum = "fb7f4c519df8c117855e19dd8cc851e89eb746fe7a73f0157e0d95fdec5369b0" dependencies = [ "proc-macro2", "quote", @@ -8700,9 +8901,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2", "quote", @@ -8733,7 +8934,7 @@ dependencies = [ "rand 0.7.3", "redox_syscall", "remove_dir_all", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -8769,18 +8970,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.15" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b3d3d2ff68104100ab257bb6bb0cb26c901abe4bd4ba15961f3bf867924012" +checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.15" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca972988113b7715266f91250ddb98070d033c62a011fa0fcc57434a649310dd" +checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ "proc-macro2", "quote", @@ -8798,22 +8999,21 @@ dependencies = [ [[package]] name = "threadpool" -version = "1.7.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2f0c90a5f3459330ac8bc0d2f879c693bb7a2f59689c1083fc4ef83834da865" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" dependencies = [ "num_cpus", ] [[package]] name = "time" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -8824,23 +9024,14 @@ checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" dependencies = [ "failure", "hmac", - "once_cell", + "once_cell 1.4.0", "pbkdf2", "rand 0.7.3", "rustc-hash", - "sha2 0.8.1", + "sha2 0.8.2", "unicode-normalization", ] -[[package]] -name = "tiny-keccak" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" -dependencies = [ - "crunchy", -] - [[package]] name = "tiny-keccak" version = "2.0.2" @@ -8852,14 +9043,20 @@ dependencies = [ [[package]] name = "tinytemplate" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3c6667d3e65eb1bc3aed6fd14011c6cbc3a0665218ab7f5daf040b9ec371a" +checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" dependencies = [ "serde", "serde_json", ] +[[package]] +name = "tinyvec" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" + [[package]] name = "tokio" version = "0.1.22" @@ -8886,11 +9083,11 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.21" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "fnv", "futures-core", "iovec", @@ -8904,7 +9101,7 @@ dependencies = [ "signal-hook-registry", "slab", "tokio-macros", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9033,7 +9230,7 @@ checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" dependencies = [ "futures-core", "rustls", - "tokio 0.2.21", + "tokio 0.2.22", "webpki", ] @@ -9127,9 +9324,9 @@ dependencies = [ [[package]] name = "tokio-uds" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798" +checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", "futures 0.1.29", @@ -9149,12 +9346,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures-core", "futures-sink", "log", "pin-project-lite", - "tokio 0.2.21", + "tokio 0.2.22", ] [[package]] @@ -9174,20 +9371,21 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" +checksum = "6d79ca061b032d6ce30c660fded31189ca0b9922bf483cd70759f13a2d86786c" dependencies = [ "cfg-if", + "log", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0693bf8d6f2bf22c690fc61a9d21ac69efdbb894a17ed596b9af0f01e64b84b" +checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" dependencies = [ "proc-macro2", "quote", @@ -9196,9 +9394,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2734b5a028fa697686f16c6d18c2c6a3c7e41513f9a213abb6754c4acb3c8d7" +checksum = "5bcf46c1f1f06aeea2d6b81f3c863d0930a596c86ad1920d4e5bad6dd1d7119a" dependencies = [ "lazy_static", ] @@ -9252,11 +9450,11 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24987a413863acfa081fb75051d0c2824cd4c450e2f0a7e03dca93ac989775fc" +checksum = "af2cc37cac8cc158119982c920cbb9b8243d8540c1d13b8aca84484bfc83a426" dependencies = [ - "criterion 0.2.11", + "criterion", "hash-db", "keccak-hasher", "memory-db", @@ -9273,7 +9471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" dependencies = [ "hash-db", - "hashbrown 0.8.0", + "hashbrown 0.8.1", "log", "rustc-hex", "smallvec 1.4.1", @@ -9300,18 +9498,18 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "trybuild" -version = "1.0.25" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459186ab1afd6d93bd23c2269125f4f7694f8771fe0e64434b4bdc212b94034d" +checksum = "b7d30fe369fd650072b352b1a9cb9587669de6b89be3b8225544012c1c45292d" dependencies = [ "dissimilar", - "glob 0.3.0", + "glob", "lazy_static", "serde", "serde_json", @@ -9319,17 +9517,6 @@ dependencies = [ "toml", ] -[[package]] -name = "twofish" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712d261e83e727c8e2dbb75dacac67c36e35db36a958ee504f2164fc052434e1" -dependencies = [ - "block-cipher-trait", - "byteorder", - "opaque-debug 0.2.3", -] - [[package]] name = "twox-hash" version = "1.5.0" @@ -9347,11 +9534,11 @@ checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" [[package]] name = "uint" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d" +checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" dependencies = [ - "byteorder", + "byteorder 1.3.4", "crunchy", "rustc-hex", "static_assertions", @@ -9377,11 +9564,11 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" dependencies = [ - "smallvec 1.4.1", + "tinyvec", ] [[package]] @@ -9392,15 +9579,15 @@ checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" [[package]] name = "unicode-width" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "universal-hash" @@ -9409,7 +9596,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ "generic-array 0.14.3", - "subtle 2.2.2", + "subtle 2.2.3", ] [[package]] @@ -9424,17 +9611,27 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.6", "futures-io", "futures-util", "futures_codec", ] +[[package]] +name = "unsigned-varint" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" +dependencies = [ + "futures-io", + "futures-util", +] + [[package]] name = "untrusted" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60369ef7a31de49bcb3f6ca728d4ba7300d9a1658f94c727d4cab8c8d9f4aece" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" @@ -9460,21 +9657,21 @@ dependencies = [ [[package]] name = "vcpkg" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" +checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" [[package]] name = "vec_map" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" [[package]] name = "void" @@ -9482,29 +9679,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -[[package]] -name = "wabt" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5c5c1286c6e578416982609f47594265f9d489f9b836157d403ad605a46693" -dependencies = [ - "serde", - "serde_derive", - "serde_json", - "wabt-sys", -] - -[[package]] -name = "wabt-sys" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7043ebb3e5d96fad7a8d3ca22ee9880748ff8c3e18092cfb2a49d3b8f9084" -dependencies = [ - "cc", - "cmake", - "glob 0.2.11", -] - [[package]] name = "wait-timeout" version = "0.2.0" @@ -9527,7 +9701,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] @@ -9587,9 +9761,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" +checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" dependencies = [ "cfg-if", "js-sys", @@ -9628,9 +9802,9 @@ checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" [[package]] name = "wasm-bindgen-test" -version = "0.3.10" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648da3460c6d2aa04b715a936329e2e311180efe650b2127d6267f4193ccac14" +checksum = "fd8e9dad8040e378f0696b017570c6bc929aac373180e06b3d67ac5059c52da3" dependencies = [ "console_error_panic_hook", "js-sys", @@ -9642,9 +9816,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.10" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2f86cd78a2aa7b1fb4bb6ed854eccb7f9263089c79542dca1576a1518a8467" +checksum = "c358c8d2507c1bae25efa069e62ea907aa28700b25c8c33dafb0b15ba4603627" dependencies = [ "proc-macro2", "quote", @@ -9735,7 +9909,7 @@ dependencies = [ "wasmtime-profiling", "wasmtime-runtime", "wat", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9777,11 +9951,11 @@ dependencies = [ "more-asserts", "rayon", "serde", - "sha2 0.8.1", + "sha2 0.8.2", "thiserror", "toml", "wasmparser 0.59.0", - "winapi 0.3.8", + "winapi 0.3.9", "zstd", ] @@ -9811,7 +9985,7 @@ dependencies = [ "wasmtime-obj", "wasmtime-profiling", "wasmtime-runtime", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9865,7 +10039,7 @@ dependencies = [ "region", "thiserror", "wasmtime-environ", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9888,9 +10062,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.37" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" +checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" dependencies = [ "js-sys", "wasm-bindgen", @@ -9898,9 +10072,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.2" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" +checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" dependencies = [ "ring", "untrusted", @@ -9950,9 +10124,9 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -9972,11 +10146,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -9985,24 +10159,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "ws" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a2c47b5798ccc774ffb93ff536aec7c4275d722fd9c740c83cdd1af1f2d94" -dependencies = [ - "byteorder", - "bytes 0.4.12", - "httparse", - "log", - "mio", - "mio-extras", - "rand 0.7.3", - "sha-1", - "slab", - "url 2.1.1", -] - [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -10026,14 +10182,14 @@ dependencies = [ [[package]] name = "yamux" -version = "0.4.7" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd37e58a1256a0b328ce9c67d8b62ecdd02f4803ba443df478835cb1a41a637c" +checksum = "9aeb8c4043cac71c3c299dff107171c220d179492350ea198e109a414981b83c" dependencies = [ "futures 0.3.5", "log", "nohash-hasher", - "parking_lot 0.10.2", + "parking_lot 0.11.0", "rand 0.7.3", "static_assertions", ] @@ -10061,18 +10217,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.1+zstd.1.4.4" +version = "0.5.3+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5d978b793ae64375b80baf652919b148f6a496ac8802922d9999f5a553194f" +checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.3+zstd.1.4.4" +version = "2.0.5+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee25eac9753cfedd48133fa1736cbd23b774e253d89badbeac7d12b23848d3f" +checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055" dependencies = [ "libc", "zstd-sys", @@ -10080,11 +10236,12 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.15+zstd.1.4.4" +version = "1.4.17+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89719b034dc22d240d5b407fb0a3fe6d29952c181cff9a9f95c0bd40b4f8f7d8" +checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" dependencies = [ "cc", - "glob 0.3.0", + "glob", + "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index f22e3427a70a1eaced1ec29eaf72af859078b09c..99b1d418a5153c19ab2dc3a39d233c43fc7c8b7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,7 +66,6 @@ members = [ "frame/babe", "frame/balances", "frame/benchmarking", - "frame/benchmark", "frame/collective", "frame/contracts", "frame/contracts/rpc", @@ -79,7 +78,6 @@ members = [ "frame/example-offchain-worker", "frame/executive", "frame/finality-tracker", - "frame/generic-asset", "frame/grandpa", "frame/identity", "frame/im-online", @@ -88,6 +86,7 @@ members = [ "frame/metadata", "frame/multisig", "frame/nicks", + "frame/node-authorization", "frame/offences", "frame/proxy", "frame/randomness-collective-flip", @@ -180,6 +179,7 @@ members = [ "utils/build-script-utils", "utils/fork-tree", "utils/frame/benchmarking-cli", + "utils/frame/frame-utilities-cli", "utils/frame/rpc/support", "utils/frame/rpc/system", "utils/wasm-builder", @@ -201,7 +201,6 @@ members = [ # # This list is ordered alphabetically. [profile.dev.package] -aes-ctr = { opt-level = 3 } aes-soft = { opt-level = 3 } aesni = { opt-level = 3 } blake2 = { opt-level = 3 } @@ -215,7 +214,6 @@ crc32fast = { opt-level = 3 } crossbeam-deque = { opt-level = 3 } crossbeam-queue = { opt-level = 3 } crypto-mac = { opt-level = 3 } -ctr = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } ed25519-dalek = { opt-level = 3 } evm-core = { opt-level = 3 } diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 0c988ebd1a22be49a9e820ed34dedfd01df27ece..8b1a47fd2bf15740e36a28692510985b4d7a28e1 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Anonymous"] description = "A new FRAME-based Substrate node, ready for hacking." edition = "2018" @@ -18,34 +18,44 @@ name = "node-template" [dependencies] structopt = "0.3.8" -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli", features = ["wasmtime"] } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor", features = ["wasmtime"] } -sc-service = { version = "0.8.0-rc5", path = "../../../client/service", features = ["wasmtime"] } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -sc-consensus-aura = { version = "0.8.0-rc5", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8.0-rc5", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-finality-grandpa = { version = "0.8.0-rc5", path = "../../../client/finality-grandpa" } -sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../../primitives/finality-grandpa" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sc-cli = { version = "0.8.0", path = "../../../client/cli", features = ["wasmtime"] } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-executor = { version = "0.8.0", path = "../../../client/executor", features = ["wasmtime"] } +sc-service = { version = "0.8.0", path = "../../../client/service", features = ["wasmtime"] } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sc-consensus-aura = { version = "0.8.0", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.8.0", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } +sc-finality-grandpa = { version = "0.8.0", path = "../../../client/finality-grandpa" } +sp-finality-grandpa = { version = "2.0.0", path = "../../../primitives/finality-grandpa" } +sc-client-api = { version = "2.0.0", path = "../../../client/api" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } # These dependencies are used for the node template's RPCs -jsonrpc-core = "14.0.3" -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../client/rpc-api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sc-basic-authorship = { version = "0.8.0-rc5", path = "../../../client/basic-authorship" } -substrate-frame-rpc-system = { version = "2.0.0-rc5", path = "../../../utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment/rpc/" } - -node-template-runtime = { version = "2.0.0-rc5", path = "../runtime" } +jsonrpc-core = "15.0.0" +sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sc-rpc-api = { version = "0.8.0", path = "../../../client/rpc-api" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } +sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } + +# These dependencies are used for runtime benchmarking +frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } +frame-benchmarking-cli = { version = "2.0.0", path = "../../../utils/frame/benchmarking-cli" } + +node-template-runtime = { version = "2.0.0", path = "../runtime" } [build-dependencies] -substrate-build-script-utils = { version = "2.0.0-rc5", path = "../../../utils/build-script-utils" } +substrate-build-script-utils = { version = "2.0.0", path = "../../../utils/build-script-utils" } + +[features] +default = [] +runtime-benchmarks = [ + "node-template-runtime/runtime-benchmarks", +] diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index 0091ef7d75912f736c1cbffde84d29e17186038c..f2faf17e4ddf44b724292d014d542b63b594b56f 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -1,5 +1,5 @@ -use sc_cli::{RunCmd, Subcommand}; use structopt::StructOpt; +use sc_cli::RunCmd; #[derive(Debug, StructOpt)] pub struct Cli { @@ -9,3 +9,31 @@ pub struct Cli { #[structopt(flatten)] pub run: RunCmd, } + +#[derive(Debug, StructOpt)] +pub enum Subcommand { + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// The custom benchmark subcommmand benchmarking runtime pallets. + #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), +} diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index b3f1cfaf11f5592196a6bd9da9ff80d257690b85..2efca0383710844d3a9d1cf6d0ae5851c7005f60 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::chain_spec; -use crate::cli::Cli; -use crate::service; +use crate::{chain_spec, service}; +use crate::cli::{Cli, Subcommand}; use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; use sc_service::PartialComponents; -use crate::service::new_partial; +use node_template_runtime::Block; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -67,14 +66,64 @@ pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); match &cli.subcommand { - Some(subcommand) => { - let runner = cli.create_runner(subcommand)?; - runner.run_subcommand(subcommand, |config| { - let PartialComponents { client, backend, task_manager, import_queue, .. } - = new_partial(&config)?; - Ok((client, backend, import_queue, task_manager)) + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, ..} + = service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) }) - } + }, + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, ..} + = service::new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + }, + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, ..} + = service::new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, ..} + = service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, backend, ..} + = service::new_partial(&config)?; + Ok((cmd.run(client, backend), task_manager)) + }) + }, + Some(Subcommand::Benchmark(cmd)) => { + if cfg!(feature = "runtime-benchmarks") { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| cmd.run::(config)) + } else { + Err("Benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`.".into()) + } + }, None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| match config.role { diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 4eba4fdd093e9b508128ca757742a6c867ddd8bb..3de31dc61ab5175a36838bc01bf3c500619d647b 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -16,6 +16,7 @@ native_executor_instance!( pub Executor, node_template_runtime::api::dispatch, node_template_runtime::native_version, + frame_benchmarking::benchmarking::HostFunctions, ); type FullClient = sc_service::TFullClient; @@ -27,7 +28,12 @@ pub fn new_partial(config: &Configuration) -> Result, sc_transaction_pool::FullPool, ( - sc_finality_grandpa::GrandpaBlockImport, + sc_consensus_aura::AuraBlockImport< + Block, + FullClient, + sc_finality_grandpa::GrandpaBlockImport, + AuraPair + >, sc_finality_grandpa::LinkHalf ) >, ServiceError> { @@ -54,21 +60,22 @@ pub fn new_partial(config: &Configuration) -> Result( + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, + aura_block_import.clone(), Some(Box::new(grandpa_block_import.clone())), None, client.clone(), inherent_data_providers.clone(), &task_manager.spawn_handle(), config.prometheus_registry(), + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), )?; Ok(sc_service::PartialComponents { client, backend, task_manager, import_queue, keystore, select_chain, transaction_pool, inherent_data_providers, - other: (grandpa_block_import, grandpa_link), + other: (aura_block_import, grandpa_link), }) } @@ -240,7 +247,7 @@ pub fn new_light(config: Configuration) -> Result { let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( sc_consensus_aura::slot_duration(&*client)?, grandpa_block_import, None, @@ -249,6 +256,7 @@ pub fn new_light(config: Configuration) -> Result { InherentDataProviders::new(), &task_manager.spawn_handle(), config.prometheus_registry(), + sp_consensus::NeverCanAuthor, )?; let finality_proof_provider = diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 6d8868386e38cf18d4a0ee29b907062affa15610..12b810de186f42627f30e8fc0d33a55ffd20cd6f 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -2,11 +2,12 @@ authors = ['Anonymous'] edition = '2018' name = 'pallet-template' -version = "2.0.0-rc5" +version = "2.0.0" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet template for defining custom runtime logic." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,27 +17,27 @@ codec = { package = "parity-scale-codec", version = "1.3.4", default-features = [dependencies.frame-support] default-features = false -version = "2.0.0-rc5" +version = "2.0.0" path = "../../../../frame/support" [dependencies.frame-system] default-features = false -version = "2.0.0-rc5" +version = "2.0.0" path = "../../../../frame/system" [dev-dependencies.sp-core] default-features = false -version = "2.0.0-rc5" +version = "2.0.0" path = "../../../../primitives/core" [dev-dependencies.sp-io] default-features = false -version = "2.0.0-rc5" +version = "2.0.0" path = "../../../../primitives/io" [dev-dependencies.sp-runtime] default-features = false -version = "2.0.0-rc5" +version = "2.0.0" path = "../../../../primitives/runtime" diff --git a/bin/node-template/pallets/template/README.md b/bin/node-template/pallets/template/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8d751a42207ddd3f2d4d361248077a468b64f36a --- /dev/null +++ b/bin/node-template/pallets/template/README.md @@ -0,0 +1 @@ +License: Unlicense \ No newline at end of file diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 8c3bf2b40473c81b2fa946b3fae86d250f448c3f..a3dff240e4847d518343507f084d6ca4339f7a43 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -42,7 +42,7 @@ impl system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index f4e8697a47d0d43cf14937c685dd50c898ccc103..393578f8d295d96bcc84b3ecf11ac5eec8a3c97d 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template-runtime" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Anonymous"] edition = "2018" license = "Unlicense" @@ -13,34 +13,39 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -pallet-aura = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/aura" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/balances" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } -pallet-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/grandpa" } -pallet-randomness-collective-flip = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-sudo = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/sudo" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/executive" } +pallet-aura = { version = "2.0.0", default-features = false, path = "../../../frame/aura" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } +frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } +pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } +pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } +frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } +frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc5"} -sp-consensus-aura = { version = "0.8.0-rc5", default-features = false, path = "../../../primitives/consensus/aura" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-rc5"} -sp-offchain = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/version" } +sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0"} +sp-consensus-aura = { version = "0.8.0", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0"} +sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -template = { version = "2.0.0-rc5", default-features = false, path = "../pallets/template", package = "pallet-template" } +# Used for runtime benchmarking +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-system-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +hex-literal = { version = "0.3.1", optional = true } + +template = { version = "2.0.0", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } @@ -58,7 +63,7 @@ std = [ "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment/std", - "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment-rpc-runtime-api/std", "serde", "sp-api/std", "sp-block-builder/std", @@ -72,6 +77,16 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "frame-system/std", - "frame-system-rpc-runtime-api/std", + "frame-system-rpc-runtime-api/std", "template/std", ] +runtime-benchmarks = [ + "sp-runtime/runtime-benchmarks", + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking", + "hex-literal", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", +] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 06e34e4551673b69dacb667de0376e99ee36e12b..e96de63731745c5613528ac5bf6b468caf22290e 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -181,7 +181,7 @@ impl frame_system::Trait for Runtime { /// Converts a module to the index of the module in `construct_runtime!`. /// /// This type is being generated by `construct_runtime!`. - type ModuleToIndex = ModuleToIndex; + type PalletInfo = PalletInfo; /// What to do if a new account is created. type OnNewAccount = (); /// What to do if an account is fully reaped from the system. @@ -211,6 +211,8 @@ impl pallet_grandpa::Trait for Runtime { )>>::IdentificationTuple; type HandleEquivocation = (); + + type WeightInfo = (); } parameter_types! { @@ -227,9 +229,11 @@ impl pallet_timestamp::Trait for Runtime { parameter_types! { pub const ExistentialDeposit: u128 = 500; + pub const MaxLocks: u32 = 50; } impl pallet_balances::Trait for Runtime { + type MaxLocks = MaxLocks; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. @@ -423,7 +427,7 @@ impl_runtime_apis! { None } } - + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Index { System::account_nonce(account) @@ -438,4 +442,39 @@ impl_runtime_apis! { TransactionPayment::query_info(uxt, len) } } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; + + use frame_system_benchmarking::Module as SystemBench; + impl frame_system_benchmarking::Trait for Runtime {} + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + + add_benchmark!(params, batches, frame_system, SystemBench::); + add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_timestamp, Timestamp); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } } diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 0f93039c3c1eb48e26f7cfbb95f1296a69f3f77c..c0f5bf889521ca1356aea4e233199602e44f5bbc 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-bench" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Substrate node integration benchmarks." edition = "2018" @@ -10,27 +10,28 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] log = "0.4.8" -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-testing = { version = "2.0.0-rc5", path = "../testing" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api/" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +node-primitives = { version = "2.0.0", path = "../primitives" } +node-testing = { version = "2.0.0", path = "../testing" } +node-runtime = { version = "2.0.0", path = "../runtime" } +sc-cli = { version = "0.8.0", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0", path = "../../../client/api/" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } serde = "1.0.101" serde_json = "1.0.41" structopt = "0.3" derive_more = "0.99.2" kvdb = "0.7" -kvdb-rocksdb = "0.9" -sp-trie = { version = "2.0.0-rc5", path = "../../../primitives/trie" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -sc-basic-authorship = { version = "0.8.0-rc5", path = "../../../client/basic-authorship" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/finality-tracker" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/timestamp" } +kvdb-rocksdb = "0.9.1" +sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sp-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" @@ -39,5 +40,5 @@ rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parity-db = { version = "0.1.2" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../client/transaction-pool" } +sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } futures = { version = "0.3.4", features = ["thread-pool"] } diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 96ef1d920c1f551cb0a467814599f0e9afbf5ce8..46b659dd88387e94c2d493e3d5975df3e7c28fab 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -79,7 +79,7 @@ fn main() { let opt = Opt::from_args(); if !opt.json { - sc_cli::init_logger(""); + sp_tracing::try_init_simple(); } let mut import_benchmarks = Vec::new(); diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 1f5db1053d741a8b9f8b67311514a20355100198..13d6e057a1e16d418278ea042d9555eb582fe5d3 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-browser-testing" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Tests for the in-browser light client." edition = "2018" @@ -8,8 +8,8 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.23.0", default-features = false } -jsonrpc-core = "14.2.0" +libp2p = { version = "0.28.1", default-features = false } +jsonrpc-core = "15.0.0" serde = "1.0.106" serde_json = "1.0.48" wasm-bindgen = { version = "=0.2.67", features = ["serde-serialize"] } @@ -17,5 +17,5 @@ wasm-bindgen-futures = "0.4.10" wasm-bindgen-test = "0.3.10" futures = "0.3.4" -node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0-rc5"} -sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0-rc5"} +node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0"} +sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0"} diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 16ab9bbe8064149ce30b966d4ece9e470ba87c3e..39df211707eaa5743ffe82a7cea23329814b2dc5 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-cli" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Generic Substrate node implementation in Rust." build = "build.rs" @@ -37,87 +37,85 @@ crate-type = ["cdylib", "rlib"] codec = { package = "parity-scale-codec", version = "1.3.4" } serde = { version = "1.0.102", features = ["derive"] } futures = { version = "0.3.1", features = ["compat"] } -hex-literal = "0.2.1" -jsonrpc-core = "14.2.0" -jsonrpc-pubsub = "14.2.0" +hex-literal = "0.3.1" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } -tracing = "0.1.18" +tracing = "0.1.19" parking_lot = "0.10.0" # primitives -sp-authority-discovery = { version = "2.0.0-rc5", path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } -grandpa-primitives = { version = "2.0.0-rc5", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/timestamp" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/finality-tracker" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } +sp-authority-discovery = { version = "2.0.0", path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } +grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } +sp-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../primitives/finality-tracker" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } # client dependencies -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-chain-spec = { version = "2.0.0-rc5", path = "../../../client/chain-spec" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../client/transaction-pool" } -sc-network = { version = "0.8.0-rc5", path = "../../../client/network" } -sc-consensus-babe = { version = "0.8.0-rc5", path = "../../../client/consensus/babe" } -grandpa = { version = "0.8.0-rc5", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "0.8.0-rc5", default-features = false, path = "../../../client/db" } -sc-offchain = { version = "2.0.0-rc5", path = "../../../client/offchain" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -sc-basic-authorship = { version = "0.8.0-rc5", path = "../../../client/basic-authorship" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } -sc-tracing = { version = "2.0.0-rc5", path = "../../../client/tracing" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "0.8.0-rc5", path = "../../../client/authority-discovery" } +sc-client-api = { version = "2.0.0", path = "../../../client/api" } +sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } +sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } +sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } +sc-network = { version = "0.8.0", path = "../../../client/network" } +sc-consensus-babe = { version = "0.8.0", path = "../../../client/consensus/babe" } +grandpa = { version = "0.8.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-client-db = { version = "0.8.0", default-features = false, path = "../../../client/db" } +sc-offchain = { version = "2.0.0", path = "../../../client/offchain" } +sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } +sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } +sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } +sc-tracing = { version = "2.0.0", path = "../../../client/tracing" } +sc-telemetry = { version = "2.0.0", path = "../../../client/telemetry" } +sc-authority-discovery = { version = "0.8.0", path = "../../../client/authority-discovery" } # frame dependencies -pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/timestamp" } -pallet-contracts = { version = "2.0.0-rc5", path = "../../../frame/contracts" } -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } -pallet-im-online = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/im-online" } -pallet-authority-discovery = { version = "2.0.0-rc5", path = "../../../frame/authority-discovery" } -pallet-staking = { version = "2.0.0-rc5", path = "../../../frame/staking" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } +pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } +frame-system = { version = "2.0.0", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } +frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } +pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } +pallet-authority-discovery = { version = "2.0.0", path = "../../../frame/authority-discovery" } +pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } +pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } # node-specific dependencies -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -node-rpc = { version = "2.0.0-rc5", path = "../rpc" } -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-executor = { version = "2.0.0-rc5", path = "../executor" } +node-runtime = { version = "2.0.0", path = "../runtime" } +node-rpc = { version = "2.0.0", path = "../rpc" } +node-primitives = { version = "2.0.0", path = "../primitives" } +node-executor = { version = "2.0.0", path = "../executor" } # CLI-specific dependencies -sc-cli = { version = "0.8.0-rc5", optional = true, path = "../../../client/cli" } -frame-benchmarking-cli = { version = "2.0.0-rc5", optional = true, path = "../../../utils/frame/benchmarking-cli" } -node-inspect = { version = "0.8.0-rc5", optional = true, path = "../inspect" } +sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli" } +frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-rc5"} +browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0"} [target.'cfg(target_arch="x86_64")'.dependencies] -node-executor = { version = "2.0.0-rc5", path = "../executor", features = [ "wasmtime" ] } -sc-cli = { version = "0.8.0-rc5", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } -sp-trie = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } +node-executor = { version = "2.0.0", path = "../executor", features = [ "wasmtime" ] } +sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } +sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } +sp-trie = { version = "2.0.0", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } [dev-dependencies] -sc-keystore = { version = "2.0.0-rc5", path = "../../../client/keystore" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-consensus-babe = { version = "0.8.0-rc5", features = ["test-helpers"], path = "../../../client/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../../../client/consensus/epochs" } -sc-service-test = { version = "2.0.0-rc5", path = "../../../client/service/test" } +sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } +sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } +sc-consensus-babe = { version = "0.8.0", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.8.0", path = "../../../client/consensus/epochs" } +sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } futures = "0.3.4" tempfile = "3.1.0" assert_cmd = "1.0" @@ -128,12 +126,13 @@ platforms = "0.2.1" [build-dependencies] structopt = { version = "0.3.8", optional = true } -node-inspect = { version = "0.8.0-rc5", optional = true, path = "../inspect" } -frame-benchmarking-cli = { version = "2.0.0-rc5", optional = true, path = "../../../utils/frame/benchmarking-cli" } -substrate-build-script-utils = { version = "2.0.0-rc5", optional = true, path = "../../../utils/build-script-utils" } +node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } +frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } +substrate-build-script-utils = { version = "2.0.0", optional = true, path = "../../../utils/build-script-utils" } +substrate-frame-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/frame-utilities-cli" } [build-dependencies.sc-cli] -version = "0.8.0-rc5" +version = "0.8.0" package = "sc-cli" path = "../../../client/cli" optional = true @@ -150,6 +149,7 @@ cli = [ "node-inspect", "sc-cli", "frame-benchmarking-cli", + "substrate-frame-cli", "sc-service/db", "structopt", "substrate-build-script-utils", diff --git a/bin/node/cli/browser-demo/build.sh b/bin/node/cli/browser-demo/build.sh index ea0380b760e31f4037d49509c7852b02baf81501..be52b7a523f0177728181bfa18b8aef614185a26 100755 --- a/bin/node/cli/browser-demo/build.sh +++ b/bin/node/cli/browser-demo/build.sh @@ -1,4 +1,4 @@ #!/usr/bin/env sh cargo +nightly build --release -p node-cli --target wasm32-unknown-unknown --no-default-features --features browser -Z features=itarget wasm-bindgen ../../../../target/wasm32-unknown-unknown/release/node_cli.wasm --out-dir pkg --target web -python -m SimpleHTTPServer 8000 +python -m http.server 8000 diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index e323f7956f169f7523186566dcc0524ed313d347..90824a5572f12f9e50f19703e96f2a9c046c5e40 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -380,7 +380,7 @@ pub fn local_testnet_config() -> ChainSpec { #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::service::{new_full_base, new_light_base}; + use crate::service::{new_full_base, new_light_base, NewFullBase}; use sc_service_test; use sp_runtime::BuildStorage; @@ -431,8 +431,9 @@ pub(crate) mod tests { sc_service_test::connectivity( integration_test_config_with_two_authorities(), |config| { - let (keep_alive, _, client, network, transaction_pool) = new_full_base(config,|_, _| ())?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + let NewFullBase { task_manager, client, network, transaction_pool, .. } + = new_full_base(config,|_, _| ())?; + Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 29e916fe0180ec2c8ffde4cacdfba361b24cf86c..6e51dae93793f4ad862f4b1d20bdf1f7fb698103 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_cli::RunCmd; +use sc_cli::{RunCmd, KeySubcommand, SignCmd, VanityCmd, VerifyCmd}; use structopt::StructOpt; /// An overarching CLI command definition. @@ -33,9 +33,8 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, StructOpt)] pub enum Subcommand { - /// A set of base subcommands handled by `sc_cli`. - #[structopt(flatten)] - Base(sc_cli::Subcommand), + /// Key management cli utilities + Key(KeySubcommand), /// The custom inspect subcommmand for decoding blocks and extrinsics. #[structopt( @@ -47,4 +46,37 @@ pub enum Subcommand { /// The custom benchmark subcommmand benchmarking runtime pallets. #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), + + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + Verify(VerifyCmd), + + /// Generate a seed that provides a vanity address. + Vanity(VanityCmd), + + /// Sign a message, with a given (secret) key. + Sign(SignCmd), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Build a chain specification with a light client sync state. + BuildSyncSpec(sc_cli::BuildSyncSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 69d9a029865783f897d7a47b5a0700a0b88b2f8f..7b84ff5a0583bc094133316e9c9e32302b340cc3 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -21,7 +21,7 @@ use node_executor::Executor; use node_runtime::{Block, RuntimeApi}; use sc_cli::{Result, SubstrateCli, RuntimeVersion, Role, ChainSpec}; use sc_service::PartialComponents; -use crate::service::new_partial; +use crate::service::{new_partial, new_full_base, NewFullBase}; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -88,18 +88,72 @@ pub fn run() -> Result<()> { runner.sync_run(|config| cmd.run::(config)) } else { - println!("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`."); - Ok(()) + Err("Benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`.".into()) } } - Some(Subcommand::Base(subcommand)) => { - let runner = cli.create_runner(subcommand)?; - runner.run_subcommand(subcommand, |config| { - let PartialComponents { client, backend, task_manager, import_queue, ..} + Some(Subcommand::Key(cmd)) => cmd.run(), + Some(Subcommand::Sign(cmd)) => cmd.run(), + Some(Subcommand::Verify(cmd)) => cmd.run(), + Some(Subcommand::Vanity(cmd)) => cmd.run(), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::BuildSyncSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let chain_spec = config.chain_spec.cloned_box(); + let network_config = config.network.clone(); + let NewFullBase { task_manager, client, network_status_sinks, .. } + = new_full_base(config, |_, _| ())?; + + Ok((cmd.run(chain_spec, network_config, client, network_status_sinks), task_manager)) + }) + }, + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, ..} = new_partial(&config)?; - Ok((client, backend, import_queue, task_manager)) + Ok((cmd.run(client, import_queue), task_manager)) }) - } + }, + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, ..} + = new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + }, + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, ..} + = new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, ..} + = new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, backend, ..} + = new_partial(&config)?; + Ok((cmd.run(client, backend), task_manager)) + }) + }, } } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index f9ff096ad4b545c19a3668e301645aec07356032..b15ace6181a8fa0321f5c206fdc6abad7ecd6cbd 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -51,14 +51,17 @@ pub fn new_partial(config: &Configuration) -> Result node_rpc::IoHandler, ( sc_consensus_babe::BabeBlockImport, grandpa::LinkHalf, sc_consensus_babe::BabeLink, ), - grandpa::SharedVoterState, + ( + grandpa::SharedVoterState, + Arc>, + ), ) >, ServiceError> { let (client, backend, keystore, task_manager) = @@ -97,6 +100,7 @@ pub fn new_partial(config: &Configuration) -> Result Result Result Result Result, + pub network: Arc::Hash>>, + pub network_status_sinks: sc_service::NetworkStatusSinks, + pub transaction_pool: Arc>, +} + /// Creates a full service from the configuration. pub fn new_full_base( config: Configuration, @@ -157,19 +173,14 @@ pub fn new_full_base( &sc_consensus_babe::BabeBlockImport, &sc_consensus_babe::BabeLink, ) -) -> Result<( - TaskManager, InherentDataProviders, Arc, - Arc::Hash>>, - Arc>, -), ServiceError> { +) -> Result { let sc_service::PartialComponents { client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, inherent_data_providers, other: (rpc_extensions_builder, import_setup, rpc_setup), } = new_partial(&config)?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + let (shared_voter_state, finality_proof_provider) = rpc_setup; let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -209,12 +220,11 @@ pub fn new_full_base( on_demand: None, remote_blockchain: None, telemetry_connection_sinks: telemetry_connection_sinks.clone(), - network_status_sinks, + network_status_sinks: network_status_sinks.clone(), system_rpc_tx, })?; let (block_import, grandpa_link, babe_link) = import_setup; - let shared_voter_state = rpc_setup; (with_startup_data)(&block_import, &babe_link); @@ -329,13 +339,16 @@ pub fn new_full_base( } network_starter.start_network(); - Ok((task_manager, inherent_data_providers, client, network, transaction_pool)) + Ok(NewFullBase { + task_manager, inherent_data_providers, client, network, network_status_sinks, + transaction_pool, + }) } /// Builds a new service for a full client. pub fn new_full(config: Configuration) -> Result { - new_full_base(config, |_, _| ()).map(|(task_manager, _, _, _, _)| { + new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| { task_manager }) } @@ -385,6 +398,7 @@ pub fn new_light_base(config: Configuration) -> Result<( inherent_data_providers.clone(), &task_manager.spawn_handle(), config.prometheus_registry(), + sp_consensus::NeverCanAuthor, )?; let finality_proof_provider = @@ -465,7 +479,7 @@ mod tests { use sp_finality_tracker; use sp_keyring::AccountKeyring; use sc_service_test::TestNetNode; - use crate::service::{new_full_base, new_light_base}; + use crate::service::{new_full_base, new_light_base, NewFullBase}; use sp_runtime::traits::IdentifyAccount; use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; use sc_client_api::BlockBackend; @@ -497,18 +511,19 @@ mod tests { chain_spec, |config| { let mut setup_handles = None; - let (keep_alive, inherent_data_providers, client, network, transaction_pool) = - new_full_base(config, - | - block_import: &sc_consensus_babe::BabeBlockImport, - babe_link: &sc_consensus_babe::BabeLink, - | { - setup_handles = Some((block_import.clone(), babe_link.clone())); - } - )?; + let NewFullBase { + task_manager, inherent_data_providers, client, network, transaction_pool, .. + } = new_full_base(config, + | + block_import: &sc_consensus_babe::BabeBlockImport, + babe_link: &sc_consensus_babe::BabeLink, + | { + setup_handles = Some((block_import.clone(), babe_link.clone())); + } + )?; let node = sc_service_test::TestNetComponents::new( - keep_alive, client, network, transaction_pool + task_manager, client, network, transaction_pool ); Ok((node, (inherent_data_providers, setup_handles.unwrap()))) }, @@ -659,8 +674,9 @@ mod tests { sc_service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), |config| { - let (keep_alive, _, client, network, transaction_pool) = new_full_base(config, |_, _| ())?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + let NewFullBase { task_manager, client, network, transaction_pool, .. } + = new_full_base(config,|_, _| ())?; + Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 6c6920d62be54cd8ea6df35ca9c7e78f43cf7e6e..70cf3c1fd6571c32f80cb38d73b6d7bbf62ddfe1 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-executor" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." edition = "2018" @@ -13,35 +13,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4" } -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-trie = { version = "2.0.0-rc5", path = "../../../primitives/trie" } +node-primitives = { version = "2.0.0", path = "../primitives" } +node-runtime = { version = "2.0.0", path = "../runtime" } +sc-executor = { version = "0.8.0", path = "../../../client/executor" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } trie-root = "0.16.0" -frame-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" -frame-support = { version = "2.0.0-rc5", path = "../../../frame/support" } -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -node-testing = { version = "2.0.0-rc5", path = "../testing" } -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -pallet-contracts = { version = "2.0.0-rc5", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-rc5", path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } -pallet-session = { version = "2.0.0-rc5", path = "../../../frame/session" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-rc5", path = "../../../frame/treasury" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-externalities = { version = "0.8.0-rc5", path = "../../../primitives/externalities" } -substrate-test-client = { version = "2.0.0-rc5", path = "../../../test-utils/client" } -wabt = "0.9.2" +frame-support = { version = "2.0.0", path = "../../../frame/support" } +frame-system = { version = "2.0.0", path = "../../../frame/system" } +node-testing = { version = "2.0.0", path = "../testing" } +pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } +pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0", path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } +pallet-session = { version = "2.0.0", path = "../../../frame/session" } +pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } +sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } +substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } +wat = "1.0" [features] wasmtime = [ diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index e7744200bccd64fb7bcf44d595e918a7d6c8eab8..723e3a7e4ba62fac313614d0dd2ee400cc9ad526 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -36,7 +36,7 @@ use node_runtime::{ constants::currency::*, }; use node_primitives::{Balance, Hash}; -use wabt; +use wat; use node_testing::keyring::*; pub mod common; @@ -164,7 +164,7 @@ fn panic_execution_with_foreign_code_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (69u128, 0u8, 0u128, 0u128, 0u128).encode() + (69u128, 0u32, 0u128, 0u128, 0u128).encode() ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -193,7 +193,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u8, 69u128, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode() ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -222,11 +222,11 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u8, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key().to_vec(), @@ -265,11 +265,11 @@ fn successful_execution_with_foreign_code_gives_ok() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u8, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key().to_vec(), @@ -580,7 +580,7 @@ const CODE_TRANSFER: &str = r#" #[test] fn deploying_wasm_contract_should_work() { - let transfer_code = wabt::wat2wasm(CODE_TRANSFER).unwrap(); + let transfer_code = wat::parse_str(CODE_TRANSFER).unwrap(); let transfer_ch = ::Hashing::hash(&transfer_code); let addr = ::DetermineContractAddress::contract_address_for( @@ -702,7 +702,7 @@ fn panic_execution_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u8, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -731,11 +731,11 @@ fn successful_execution_gives_ok() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u8, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() ); t.insert( >::hashed_key().to_vec(), diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index b39cf344e6034af0608f848b5ae87722bee5fc28..d04af1d827009f51340daeacc9e055ab6ec5e00e 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -133,11 +133,11 @@ fn transaction_fee_is_correct() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u8, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() + (0u32, 0u32, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u8, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() + (0u32, 0u32, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() ); t.insert( >::hashed_key().to_vec(), diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 64c2deedac788ea8681ea54c93ec09125d8e1684..af74c0c0b7c299d521d519230f215b9dd5ac031f 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -224,7 +224,7 @@ fn submitted_transaction_should_be_valid() { let author = extrinsic.signature.clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; - let account = frame_system::AccountInfo { nonce: 0u32, refcount: 0u8, data }; + let account = frame_system::AccountInfo { nonce: 0, refcount: 0, data }; >::insert(&address, account); // check validity diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index b7f828a5f1e569ae31ecf20e5e6170fe50a023fb..3686ddf27669bfa10a54eab84437c7ae813a4b96 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-inspect" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "1.3.4" } derive_more = "0.99" log = "0.4.8" -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sc-cli = { version = "0.8.0", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0", path = "../../../client/api" } +sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } structopt = "0.3.8" diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 6ff8a05d614326907e98d971e9c9590a4e4dfc09..305764970c149eb54b36939fa6b57e4d59e505a6 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-primitives" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,13 +12,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/application-crypto" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../../primitives/application-crypto" } +sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] -sp-serializer = { version = "2.0.0-rc5", path = "../../../primitives/serializer" } +sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } pretty_assertions = "0.6.1" [features] diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index d1a76f2ab3754425ebde98508d18745f7155ef55..9f358e901dafa1917c142b4533bbe49436d6537a 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc-client" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,10 +11,10 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -env_logger = "0.7.0" futures = "0.1.29" hyper = "0.12.35" -jsonrpc-core-client = { version = "14.2.0", default-features = false, features = ["http"] } +jsonrpc-core-client = { version = "15.0.0", default-features = false, features = ["http"] } log = "0.4.8" -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } +node-primitives = { version = "2.0.0", path = "../primitives" } +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } diff --git a/bin/node/rpc-client/src/main.rs b/bin/node/rpc-client/src/main.rs index eadd1c8d472476b6db57570d08a0233d36bea45d..31f1efa28ccd0684c363f577d491ce6e98b34e7f 100644 --- a/bin/node/rpc-client/src/main.rs +++ b/bin/node/rpc-client/src/main.rs @@ -35,7 +35,7 @@ use jsonrpc_core_client::{ }; fn main() { - env_logger::init(); + sp_tracing::try_init_simple(); rt::run(rt::lazy(|| { let uri = "http://localhost:9933"; diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index ee11ff4ac8c4956ec6326148394b5acdae9b1e9f..d80b686b5ac932b4e02230d54892921c2e36aba6 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,25 +11,25 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpc-core = "14.2.0" -jsonrpc-pubsub = "14.2.0" -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -pallet-contracts-rpc = { version = "0.8.0-rc5", path = "../../../frame/contracts/rpc/" } -pallet-transaction-payment-rpc = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment/rpc/" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-consensus-babe = { version = "0.8.0-rc5", path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { version = "0.8.0-rc5", path = "../../../client/consensus/babe/rpc" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../../../client/consensus/epochs" } -sc-finality-grandpa = { version = "0.8.0-rc5", path = "../../../client/finality-grandpa" } -sc-finality-grandpa-rpc = { version = "0.8.0-rc5", path = "../../../client/finality-grandpa/rpc" } -sc-keystore = { version = "2.0.0-rc5", path = "../../../client/keystore" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../client/rpc-api" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -substrate-frame-rpc-system = { version = "2.0.0-rc5", path = "../../../utils/frame/rpc/system" } +jsonrpc-core = "15.0.0" +node-primitives = { version = "2.0.0", path = "../primitives" } +node-runtime = { version = "2.0.0", path = "../runtime" } +pallet-contracts-rpc = { version = "0.8.0", path = "../../../frame/contracts/rpc/" } +pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } +sc-client-api = { version = "2.0.0", path = "../../../client/api" } +sc-consensus-babe = { version = "0.8.0", path = "../../../client/consensus/babe" } +sc-consensus-babe-rpc = { version = "0.8.0", path = "../../../client/consensus/babe/rpc" } +sc-consensus-epochs = { version = "0.8.0", path = "../../../client/consensus/epochs" } +sc-finality-grandpa = { version = "0.8.0", path = "../../../client/finality-grandpa" } +sc-finality-grandpa-rpc = { version = "0.8.0", path = "../../../client/finality-grandpa/rpc" } +sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } +sc-rpc-api = { version = "0.8.0", path = "../../../client/rpc-api" } +sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index a20fb03ebe1e6be0d6c663885971336ce6d8c0f9..55a7adf612119a28d727f9c62e240a9beaed965c 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -32,12 +32,13 @@ use std::sync::Arc; -use jsonrpc_pubsub::manager::SubscriptionManager; use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; use sc_consensus_babe::{Config, Epoch}; use sc_consensus_babe_rpc::BabeRpcHandler; use sc_consensus_epochs::SharedEpochChanges; -use sc_finality_grandpa::{SharedVoterState, SharedAuthoritySet, GrandpaJustificationStream}; +use sc_finality_grandpa::{ + SharedVoterState, SharedAuthoritySet, FinalityProofProvider, GrandpaJustificationStream +}; use sc_finality_grandpa_rpc::GrandpaRpcHandler; use sc_keystore::KeyStorePtr; pub use sc_rpc_api::DenyUnsafe; @@ -46,6 +47,7 @@ use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; +use sc_rpc::SubscriptionTaskExecutor; use sp_transaction_pool::TransactionPool; /// Light client extra dependencies. @@ -71,19 +73,21 @@ pub struct BabeDeps { } /// Extra dependencies for GRANDPA -pub struct GrandpaDeps { +pub struct GrandpaDeps { /// Voting round info. pub shared_voter_state: SharedVoterState, /// Authority set info. pub shared_authority_set: SharedAuthoritySet, /// Receives notifications about justification events from Grandpa. pub justification_stream: GrandpaJustificationStream, - /// Subscription manager to keep track of pubsub subscribers. - pub subscriptions: SubscriptionManager, + /// Executor to drive the subscription manager in the Grandpa RPC handler. + pub subscription_executor: SubscriptionTaskExecutor, + /// Finality proof provider. + pub finality_provider: Arc>, } /// Full client dependencies. -pub struct FullDeps { +pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. @@ -95,15 +99,15 @@ pub struct FullDeps { /// BABE specific dependencies. pub babe: BabeDeps, /// GRANDPA specific dependencies. - pub grandpa: GrandpaDeps, + pub grandpa: GrandpaDeps, } /// A IO handler that uses all Full RPC extensions. pub type IoHandler = jsonrpc_core::IoHandler; /// Instantiate all Full RPC extensions. -pub fn create_full( - deps: FullDeps, +pub fn create_full( + deps: FullDeps, ) -> jsonrpc_core::IoHandler where C: ProvideRuntimeApi, C: HeaderBackend + HeaderMetadata + 'static, @@ -115,6 +119,8 @@ pub fn create_full( C::Api: BlockBuilder, P: TransactionPool + 'static, SC: SelectChain +'static, + B: sc_client_api::Backend + Send + Sync + 'static, + B::State: sc_client_api::backend::StateBackend>, { use substrate_frame_rpc_system::{FullSystem, SystemApi}; use pallet_contracts_rpc::{Contracts, ContractsApi}; @@ -139,7 +145,8 @@ pub fn create_full( shared_voter_state, shared_authority_set, justification_stream, - subscriptions, + subscription_executor, + finality_provider, } = grandpa; io.extend_with( @@ -172,7 +179,8 @@ pub fn create_full( shared_authority_set, shared_voter_state, justification_stream, - subscriptions, + subscription_executor, + finality_provider, ) ) ); diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 35ed7400459f297f2b64562bb654cffe56c24b93..47a26c92493d1b03cf6adedd5f06a6076b76eb1f 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-runtime" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -18,76 +18,77 @@ codec = { package = "parity-scale-codec", version = "1.3.4", default-features = integer-sqrt = { version = "0.1.2" } serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" -hex-literal = { version = "0.2.1", optional = true } +hex-literal = { version = "0.3.1", optional = true } # primitives -sp-authority-discovery = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-rc5", default-features = false, path = "../../../primitives/consensus/babe" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-rc5"} -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/inherents" } -node-primitives = { version = "2.0.0-rc5", default-features = false, path = "../primitives" } -sp-offchain = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/staking" } -sp-keyring = { version = "2.0.0-rc5", optional = true, path = "../../../primitives/keyring" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/session" } -sp-transaction-pool = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/version" } +sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../../primitives/consensus/babe" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0"} +sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } +node-primitives = { version = "2.0.0", default-features = false, path = "../primitives" } +sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } +sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../../primitives/staking" } +sp-keyring = { version = "2.0.0", optional = true, path = "../../../primitives/keyring" } +sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } +sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } # frame dependencies -frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system" } -frame-system-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-authority-discovery = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/authority-discovery" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/authorship" } -pallet-babe = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/babe" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/balances" } -pallet-collective = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/collective" } -pallet-contracts = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc5", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } -pallet-democracy = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/democracy" } -pallet-elections-phragmen = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/finality-tracker" } -pallet-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/indices" } -pallet-identity = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/identity" } -pallet-membership = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/membership" } -pallet-multisig = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/multisig" } -pallet-offences = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/offences" } -pallet-offences-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-proxy = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/proxy" } -pallet-randomness-collective-flip = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-recovery = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/recovery" } -pallet-session = { version = "2.0.0-rc5", features = ["historical"], path = "../../../frame/session", default-features = false } -pallet-session-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/staking/reward-curve" } -pallet-scheduler = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/scheduler" } -pallet-society = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/society" } -pallet-sudo = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/sudo" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/timestamp" } -pallet-treasury = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/treasury" } -pallet-utility = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/utility" } -pallet-transaction-payment = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-vesting = { version = "2.0.0-rc5", default-features = false, path = "../../../frame/vesting" } +frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } +frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } +frame-system-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../frame/authority-discovery" } +pallet-authorship = { version = "2.0.0", default-features = false, path = "../../../frame/authorship" } +pallet-babe = { version = "2.0.0", default-features = false, path = "../../../frame/babe" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } +pallet-collective = { version = "2.0.0", default-features = false, path = "../../../frame/collective" } +pallet-contracts = { version = "2.0.0", default-features = false, path = "../../../frame/contracts" } +pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-democracy = { version = "2.0.0", default-features = false, path = "../../../frame/democracy" } +pallet-elections-phragmen = { version = "2.0.0", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../frame/finality-tracker" } +pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "2.0.0", default-features = false, path = "../../../frame/identity" } +pallet-membership = { version = "2.0.0", default-features = false, path = "../../../frame/membership" } +pallet-multisig = { version = "2.0.0", default-features = false, path = "../../../frame/multisig" } +pallet-offences = { version = "2.0.0", default-features = false, path = "../../../frame/offences" } +pallet-offences-benchmarking = { version = "2.0.0", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } +pallet-proxy = { version = "2.0.0", default-features = false, path = "../../../frame/proxy" } +pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "2.0.0", default-features = false, path = "../../../frame/recovery" } +pallet-session = { version = "2.0.0", features = ["historical"], path = "../../../frame/session", default-features = false } +pallet-session-benchmarking = { version = "2.0.0", path = "../../../frame/session/benchmarking", default-features = false, optional = true } +pallet-staking = { version = "2.0.0", default-features = false, path = "../../../frame/staking" } +pallet-staking-reward-curve = { version = "2.0.0", default-features = false, path = "../../../frame/staking/reward-curve" } +pallet-scheduler = { version = "2.0.0", default-features = false, path = "../../../frame/scheduler" } +pallet-society = { version = "2.0.0", default-features = false, path = "../../../frame/society" } +pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } +pallet-treasury = { version = "2.0.0", default-features = false, path = "../../../frame/treasury" } +pallet-utility = { version = "2.0.0", default-features = false, path = "../../../frame/utility" } +pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-vesting = { version = "2.0.0", default-features = false, path = "../../../frame/vesting" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } [features] default = ["std"] +with-tracing = [ "frame-executive/with-tracing" ] std = [ "sp-authority-discovery/std", "pallet-authority-discovery/std", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index acc1b072818345d0c90d17ff963ed0c63c3312ff..4d2f11d2f9206a1b6e8f5fe05f8f96ab6459958a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -83,6 +83,7 @@ use impls::{CurrencyToVoteHandler, Author}; /// Constant values used within the runtime. pub mod constants; use constants::{time::*, currency::*}; +use sp_runtime::generic::Era; /// Weights for pallets used in the runtime. mod weights; @@ -108,7 +109,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 257, + spec_version: 259, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -178,17 +179,17 @@ impl frame_system::Trait for Runtime { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; - type ModuleToIndex = ModuleToIndex; + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); - type SystemWeightInfo = (); + type SystemWeightInfo = weights::frame_system::WeightInfo; } impl pallet_utility::Trait for Runtime { type Event = Event; type Call = Call; - type WeightInfo = (); + type WeightInfo = weights::pallet_utility::WeightInfo; } parameter_types! { @@ -206,7 +207,7 @@ impl pallet_multisig::Trait for Runtime { type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; - type WeightInfo = (); + type WeightInfo = weights::pallet_multisig::WeightInfo; } parameter_types! { @@ -215,6 +216,9 @@ parameter_types! { // Additional storage item size of 33 bytes. pub const ProxyDepositFactor: Balance = deposit(0, 33); pub const MaxProxies: u16 = 32; + pub const AnnouncementDepositBase: Balance = deposit(1, 8); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; } /// The type used to represent the kinds of proxying allowed. @@ -230,13 +234,20 @@ impl InstanceFilter for ProxyType { fn filter(&self, c: &Call) -> bool { match self { ProxyType::Any => true, - ProxyType::NonTransfer => !matches!(c, - Call::Balances(..) | Call::Vesting(pallet_vesting::Call::vested_transfer(..)) - | Call::Indices(pallet_indices::Call::transfer(..)) + ProxyType::NonTransfer => !matches!( + c, + Call::Balances(..) | + Call::Vesting(pallet_vesting::Call::vested_transfer(..)) | + Call::Indices(pallet_indices::Call::transfer(..)) ), - ProxyType::Governance => matches!(c, - Call::Democracy(..) | Call::Council(..) | Call::Society(..) - | Call::TechnicalCommittee(..) | Call::Elections(..) | Call::Treasury(..) + ProxyType::Governance => matches!( + c, + Call::Democracy(..) | + Call::Council(..) | + Call::Society(..) | + Call::TechnicalCommittee(..) | + Call::Elections(..) | + Call::Treasury(..) ), ProxyType::Staking => matches!(c, Call::Staking(..)), } @@ -260,11 +271,16 @@ impl pallet_proxy::Trait for Runtime { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; - type WeightInfo = (); + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; } parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub const MaxScheduledPerBlock: u32 = 50; } impl pallet_scheduler::Trait for Runtime { @@ -274,7 +290,8 @@ impl pallet_scheduler::Trait for Runtime { type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; - type WeightInfo = (); + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = weights::pallet_scheduler::WeightInfo; } parameter_types! { @@ -301,6 +318,8 @@ impl pallet_babe::Trait for Runtime { type HandleEquivocation = pallet_babe::EquivocationHandler; + + type WeightInfo = (); } parameter_types! { @@ -312,14 +331,18 @@ impl pallet_indices::Trait for Runtime { type Currency = Balances; type Deposit = IndexDeposit; type Event = Event; - type WeightInfo = (); + type WeightInfo = weights::pallet_indices::WeightInfo; } parameter_types! { pub const ExistentialDeposit: Balance = 1 * DOLLARS; + // For weight estimation, we assume that the most locks on an individual account will be 50. + // This number may need to be adjusted in the future if this assumption no longer holds true. + pub const MaxLocks: u32 = 50; } impl pallet_balances::Trait for Runtime { + type MaxLocks = MaxLocks; type Balance = Balance; type DustRemoval = (); type Event = Event; @@ -352,7 +375,7 @@ impl pallet_timestamp::Trait for Runtime { type Moment = Moment; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; - type WeightInfo = (); + type WeightInfo = weights::pallet_timestamp::WeightInfo; } parameter_types! { @@ -389,7 +412,7 @@ impl pallet_session::Trait for Runtime { type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type WeightInfo = (); + type WeightInfo = weights::pallet_session::WeightInfo; } impl pallet_session::historical::Trait for Runtime { @@ -446,7 +469,7 @@ impl pallet_staking::Trait for Runtime { type MinSolutionScoreBump = MinSolutionScoreBump; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type UnsignedPriority = StakingUnsignedPriority; - type WeightInfo = (); + type WeightInfo = weights::pallet_staking::WeightInfo; } parameter_types! { @@ -501,6 +524,7 @@ impl pallet_democracy::Trait for Runtime { parameter_types! { pub const CouncilMotionDuration: BlockNumber = 5 * DAYS; pub const CouncilMaxProposals: u32 = 100; + pub const CouncilMaxMembers: u32 = 100; } type CouncilCollective = pallet_collective::Instance1; @@ -510,7 +534,9 @@ impl pallet_collective::Trait for Runtime { type Event = Event; type MotionDuration = CouncilMotionDuration; type MaxProposals = CouncilMaxProposals; - type WeightInfo = (); + type MaxMembers = CouncilMaxMembers; + type DefaultVote = pallet_collective::PrimeDefaultVote; + type WeightInfo = weights::pallet_collective::WeightInfo; } parameter_types! { @@ -522,8 +548,8 @@ parameter_types! { pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; } -// Make sure that there are no more than `MAX_MEMBERS` members elected via elections-phragmen. -const_assert!(DesiredMembers::get() <= pallet_collective::MAX_MEMBERS); +// Make sure that there are no more than `MaxMembers` members elected via elections-phragmen. +const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); impl pallet_elections_phragmen::Trait for Runtime { type Event = Event; @@ -542,12 +568,13 @@ impl pallet_elections_phragmen::Trait for Runtime { type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; - type WeightInfo = (); + type WeightInfo = weights::pallet_elections_phragmen::WeightInfo; } parameter_types! { pub const TechnicalMotionDuration: BlockNumber = 5 * DAYS; pub const TechnicalMaxProposals: u32 = 100; + pub const TechnicalMaxMembers: u32 = 100; } type TechnicalCollective = pallet_collective::Instance2; @@ -557,7 +584,9 @@ impl pallet_collective::Trait for Runtime { type Event = Event; type MotionDuration = TechnicalMotionDuration; type MaxProposals = TechnicalMaxProposals; - type WeightInfo = (); + type MaxMembers = TechnicalMaxMembers; + type DefaultVote = pallet_collective::PrimeDefaultVote; + type WeightInfo = weights::pallet_collective::WeightInfo; } type EnsureRootOrHalfCouncil = EnsureOneOf< @@ -584,8 +613,14 @@ parameter_types! { pub const TipCountdown: BlockNumber = 1 * DAYS; pub const TipFindersFee: Percent = Percent::from_percent(20); pub const TipReportDepositBase: Balance = 1 * DOLLARS; - pub const TipReportDepositPerByte: Balance = 1 * CENTS; + pub const DataDepositPerByte: Balance = 1 * CENTS; + pub const BountyDepositBase: Balance = 1 * DOLLARS; + pub const BountyDepositPayoutDelay: BlockNumber = 1 * DAYS; pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const BountyUpdatePeriod: BlockNumber = 14 * DAYS; + pub const MaximumReasonLength: u32 = 16384; + pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); + pub const BountyValueMinimum: Balance = 5 * DOLLARS; } impl pallet_treasury::Trait for Runtime { @@ -605,15 +640,21 @@ impl pallet_treasury::Trait for Runtime { type TipCountdown = TipCountdown; type TipFindersFee = TipFindersFee; type TipReportDepositBase = TipReportDepositBase; - type TipReportDepositPerByte = TipReportDepositPerByte; + type DataDepositPerByte = DataDepositPerByte; type Event = Event; - type ProposalRejection = (); + type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BountyDepositBase = BountyDepositBase; + type BountyDepositPayoutDelay = BountyDepositPayoutDelay; + type BountyUpdatePeriod = BountyUpdatePeriod; + type BountyCuratorDeposit = BountyCuratorDeposit; + type BountyValueMinimum = BountyValueMinimum; + type MaximumReasonLength = MaximumReasonLength; type BurnDestination = (); - type WeightInfo = (); + type WeightInfo = weights::pallet_treasury::WeightInfo; } parameter_types! { @@ -654,9 +695,9 @@ parameter_types! { pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; } - -impl frame_system::offchain::CreateSignedTransaction for Runtime where - Call: From, +impl frame_system::offchain::CreateSignedTransaction for Runtime + where + Call: From, { fn create_transaction>( call: Call, @@ -664,6 +705,7 @@ impl frame_system::offchain::CreateSignedTransaction for R account: AccountId, nonce: Index, ) -> Option<(Call, ::SignaturePayload)> { + let tip = 0; // take the biggest period possible. let period = BlockHashCount::get() .checked_next_power_of_two() @@ -674,22 +716,25 @@ impl frame_system::offchain::CreateSignedTransaction for R // The `System::block_number` is initialized with `n+1`, // so the actual block number is `n`. .saturating_sub(1); - let tip = 0; - let extra: SignedExtra = ( + let era = Era::mortal(period, current_block); + let extra = ( frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), ); - let raw_payload = SignedPayload::new(call, extra).map_err(|e| { - debug::warn!("Unable to create signed payload: {:?}", e); - }).ok()?; - let signature = raw_payload.using_encoded(|payload| { - C::sign(payload, public) - })?; + let raw_payload = SignedPayload::new(call, extra) + .map_err(|e| { + debug::warn!("Unable to create signed payload: {:?}", e); + }) + .ok()?; + let signature = raw_payload + .using_encoded(|payload| { + C::sign(payload, public) + })?; let address = Indices::unlookup(account); let (call, extra, _) = raw_payload.deconstruct(); Some((call, (address, signature.into(), extra))) @@ -714,7 +759,7 @@ impl pallet_im_online::Trait for Runtime { type SessionDuration = SessionDuration; type ReportUnresponsiveness = Offences; type UnsignedPriority = ImOnlineUnsignedPriority; - type WeightInfo = (); + type WeightInfo = weights::pallet_im_online::WeightInfo; } parameter_types! { @@ -726,7 +771,6 @@ impl pallet_offences::Trait for Runtime { type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; - type WeightInfo = (); } impl pallet_authority_discovery::Trait for Runtime {} @@ -747,6 +791,8 @@ impl pallet_grandpa::Trait for Runtime { type HandleEquivocation = pallet_grandpa::EquivocationHandler; + + type WeightInfo = (); } parameter_types! { @@ -781,7 +827,7 @@ impl pallet_identity::Trait for Runtime { type Slashed = Treasury; type ForceOrigin = EnsureRootOrHalfCouncil; type RegistrarOrigin = EnsureRootOrHalfCouncil; - type WeightInfo = (); + type WeightInfo = weights::pallet_identity::WeightInfo; } parameter_types! { @@ -838,7 +884,7 @@ impl pallet_vesting::Trait for Runtime { type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = (); + type WeightInfo = weights::pallet_vesting::WeightInfo; } construct_runtime!( @@ -1122,15 +1168,9 @@ impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, - extra: bool, + config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. // To get around that, we separated the Session benchmarks into its own crate, which is why // we need these two lines below. @@ -1142,25 +1182,23 @@ impl_runtime_apis! { impl pallet_offences_benchmarking::Trait for Runtime {} impl frame_system_benchmarking::Trait for Runtime {} - let whitelist: Vec> = vec![ + let whitelist: Vec = vec![ // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec(), - // Caller 0 Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), // Treasury Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), ]; let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist, extra); + let params = (&config, &whitelist); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); diff --git a/bin/node/runtime/src/weights/frame_system.rs b/bin/node/runtime/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..9522fa75203906ab3c7264154a4b33835375843c --- /dev/null +++ b/bin/node/runtime/src/weights/frame_system.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl frame_system::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["b"] + fn remark() -> Weight { + (1305000 as Weight) + } + fn set_heap_pages() -> Weight { + (2023000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["d"] + fn set_changes_trie_config() -> Weight { + (10026000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + .saturating_add((656000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_storage(i: u32, ) -> Weight { + (4327000 as Weight) + .saturating_add((478000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_prefix(p: u32, ) -> Weight { + (8349000 as Weight) + .saturating_add((838000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn suicide() -> Weight { + (29247000 as Weight) + } +} diff --git a/bin/node/runtime/src/weights/mod.rs b/bin/node/runtime/src/weights/mod.rs index 70bae879ce05cfe002af6114cf2e375a59e594aa..2058227ab2e69e5763414907813b459442a85a6f 100644 --- a/bin/node/runtime/src/weights/mod.rs +++ b/bin/node/runtime/src/weights/mod.rs @@ -15,5 +15,20 @@ //! A list of the different weight modules for our runtime. +pub mod frame_system; pub mod pallet_balances; +pub mod pallet_treasury; +pub mod pallet_collective; pub mod pallet_democracy; +pub mod pallet_identity; +pub mod pallet_indices; +pub mod pallet_im_online; +pub mod pallet_multisig; +pub mod pallet_proxy; +pub mod pallet_scheduler; +pub mod pallet_session; +pub mod pallet_staking; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_vesting; +pub mod pallet_elections_phragmen; diff --git a/bin/node/runtime/src/weights/pallet_balances.rs b/bin/node/runtime/src/weights/pallet_balances.rs index 21a90a97e63a122b62d30bb90c06dedeebf7d98d..bcbc4ced6ef56895d4c62a31a7b0580ad71b9ecf 100644 --- a/bin/node/runtime/src/weights/pallet_balances.rs +++ b/bin/node/runtime/src/weights/pallet_balances.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for the Balances Pallet +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; diff --git a/bin/node/runtime/src/weights/pallet_collective.rs b/bin/node/runtime/src/weights/pallet_collective.rs new file mode 100644 index 0000000000000000000000000000000000000000..32b4ad02d7aa94c8007429b24bef2f062b310036 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_collective.rs @@ -0,0 +1,97 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_collective::WeightInfo for WeightInfo { + fn set_members(m: u32, n: u32, p: u32, ) -> Weight { + (0 as Weight) + .saturating_add((21040000 as Weight).saturating_mul(m as Weight)) + .saturating_add((173000 as Weight).saturating_mul(n as Weight)) + .saturating_add((31595000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn execute(b: u32, m: u32, ) -> Weight { + (43359000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add((123000 as Weight).saturating_mul(m as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn propose_execute(b: u32, m: u32, ) -> Weight { + (54134000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add((239000 as Weight).saturating_mul(m as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + } + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + (90650000 as Weight) + .saturating_add((5000 as Weight).saturating_mul(b as Weight)) + .saturating_add((152000 as Weight).saturating_mul(m as Weight)) + .saturating_add((970000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn vote(m: u32, ) -> Weight { + (74460000 as Weight) + .saturating_add((290000 as Weight).saturating_mul(m as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + (86360000 as Weight) + .saturating_add((232000 as Weight).saturating_mul(m as Weight)) + .saturating_add((954000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { + (123653000 as Weight) + .saturating_add((1000 as Weight).saturating_mul(b as Weight)) + .saturating_add((287000 as Weight).saturating_mul(m as Weight)) + .saturating_add((920000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_disapproved(m: u32, p: u32, ) -> Weight { + (95395000 as Weight) + .saturating_add((236000 as Weight).saturating_mul(m as Weight)) + .saturating_add((965000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { + (135284000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add((218000 as Weight).saturating_mul(m as Weight)) + .saturating_add((951000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn disapprove_proposal(p: u32, ) -> Weight { + (50500000 as Weight) + .saturating_add((966000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_elections_phragmen.rs b/bin/node/runtime/src/weights/pallet_elections_phragmen.rs new file mode 100644 index 0000000000000000000000000000000000000000..f7ce1620a1f56e6dad23beb7ef7649e8b24eba49 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_elections_phragmen.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_elections_phragmen::WeightInfo for WeightInfo { + fn vote(v: u32, ) -> Weight { + (91_489_000 as Weight) + .saturating_add((199_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn vote_update(v: u32, ) -> Weight { + (56_511_000 as Weight) + .saturating_add((245_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_voter() -> Weight { + (76_714_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_743_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((31_750_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(7 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_733_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((31_861_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn submit_candidacy(c: u32, ) -> Weight { + (74_714_000 as Weight) + .saturating_add((315_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn renounce_candidacy_candidate(c: u32, ) -> Weight { + (50_408_000 as Weight) + .saturating_add((159_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn renounce_candidacy_members() -> Weight { + (79_626_000 as Weight) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn renounce_candidacy_runners_up() -> Weight { + (49_715_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_member_with_replacement() -> Weight { + (76_572_000 as Weight) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn remove_member_wrong_refund() -> Weight { + (8_777_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_identity.rs b/bin/node/runtime/src/weights/pallet_identity.rs new file mode 100644 index 0000000000000000000000000000000000000000..2995a7674f8aea92511602ccc20d4f10db631118 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_identity.rs @@ -0,0 +1,136 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_identity::WeightInfo for WeightInfo { + fn add_registrar(r: u32, ) -> Weight { + (39_603_000 as Weight) + .saturating_add((418_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_identity(r: u32, x: u32, ) -> Weight { + (110_679_000 as Weight) + .saturating_add((389_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_985_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_subs_new(s: u32, ) -> Weight { + (78_697_000 as Weight) + .saturating_add((15_225_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(DbWeight::get().writes(1 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn set_subs_old(p: u32, ) -> Weight { + (71_308_000 as Weight) + .saturating_add((5_772_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { + (91_553_000 as Weight) + .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_749_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_621_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn request_judgement(r: u32, x: u32, ) -> Weight { + (110_856_000 as Weight) + .saturating_add((496_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_221_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel_request(r: u32, x: u32, ) -> Weight { + (96_857_000 as Weight) + .saturating_add((311_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_204_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_fee(r: u32, ) -> Weight { + (16_276_000 as Weight) + .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_account_id(r: u32, ) -> Weight { + (18_530_000 as Weight) + .saturating_add((391_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_fields(r: u32, ) -> Weight { + (16_359_000 as Weight) + .saturating_add((379_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn provide_judgement(r: u32, x: u32, ) -> Weight { + (72_869_000 as Weight) + .saturating_add((423_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_187_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + (123_199_000 as Weight) + .saturating_add((71_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_730_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn add_sub(s: u32, ) -> Weight { + (110_070_000 as Weight) + .saturating_add((262_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn rename_sub(s: u32, ) -> Weight { + (37_130_000 as Weight) + .saturating_add((79_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_sub(s: u32, ) -> Weight { + (103_295_000 as Weight) + .saturating_add((235_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn quit_sub(s: u32, ) -> Weight { + (65_716_000 as Weight) + .saturating_add((227_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_im_online.rs b/bin/node/runtime/src/weights/pallet_im_online.rs new file mode 100644 index 0000000000000000000000000000000000000000..25daff6a6e18f0bd202eb35f07654e706fd1791e --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_im_online.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_im_online::WeightInfo for WeightInfo { + fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { + (139830000 as Weight) + .saturating_add((211000 as Weight).saturating_mul(k as Weight)) + .saturating_add((654000 as Weight).saturating_mul(e as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_indices.rs b/bin/node/runtime/src/weights/pallet_indices.rs new file mode 100644 index 0000000000000000000000000000000000000000..f6a708bbd46b602034fa98b078d84ff8ce8c49d4 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_indices.rs @@ -0,0 +1,52 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_indices::WeightInfo for WeightInfo { + fn claim() -> Weight { + (56_237_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn transfer() -> Weight { + (63_665_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn free() -> Weight { + (50_736_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_transfer() -> Weight { + (52_361_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn freeze() -> Weight { + (46_483_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_multisig.rs b/bin/node/runtime/src/weights/pallet_multisig.rs new file mode 100644 index 0000000000000000000000000000000000000000..1de1d9a67f441e31d209bcef799f19d0d012d438 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_multisig.rs @@ -0,0 +1,90 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_multisig::WeightInfo for WeightInfo { + fn as_multi_threshold_1(z: u32, ) -> Weight { + (17_161_000 as Weight) + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + } + fn as_multi_create(s: u32, z: u32, ) -> Weight { + (79_857_000 as Weight) + .saturating_add((131_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn as_multi_create_store(s: u32, z: u32, ) -> Weight { + (90_218_000 as Weight) + .saturating_add((129_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + (48_402_000 as Weight) + .saturating_add((132_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { + (88_390_000 as Weight) + .saturating_add((120_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + (98_960_000 as Weight) + .saturating_add((276_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((6_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn approve_as_multi_create(s: u32, ) -> Weight { + (80_185_000 as Weight) + .saturating_add((121_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn approve_as_multi_approve(s: u32, ) -> Weight { + (48_386_000 as Weight) + .saturating_add((143_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn approve_as_multi_complete(s: u32, ) -> Weight { + (177_181_000 as Weight) + .saturating_add((273_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn cancel_as_multi(s: u32, ) -> Weight { + (126_334_000 as Weight) + .saturating_add((124_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_proxy.rs b/bin/node/runtime/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..92c43cd4853a215edade20d9b281e20ee8559663 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_proxy.rs @@ -0,0 +1,85 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_proxy::WeightInfo for WeightInfo { + fn proxy(p: u32, ) -> Weight { + (26127000 as Weight) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (55405000 as Weight) + .saturating_add((774000 as Weight).saturating_mul(a as Weight)) + .saturating_add((209000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_announcement(a: u32, p: u32, ) -> Weight { + (35879000 as Weight) + .saturating_add((783000 as Weight).saturating_mul(a as Weight)) + .saturating_add((20000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_announcement(a: u32, p: u32, ) -> Weight { + (36097000 as Weight) + .saturating_add((780000 as Weight).saturating_mul(a as Weight)) + .saturating_add((12000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn announce(a: u32, p: u32, ) -> Weight { + (53769000 as Weight) + .saturating_add((675000 as Weight).saturating_mul(a as Weight)) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn add_proxy(p: u32, ) -> Weight { + (36082000 as Weight) + .saturating_add((234000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxy(p: u32, ) -> Weight { + (32885000 as Weight) + .saturating_add((267000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxies(p: u32, ) -> Weight { + (31735000 as Weight) + .saturating_add((215000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn anonymous(p: u32, ) -> Weight { + (50907000 as Weight) + .saturating_add((61000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_anonymous(p: u32, ) -> Weight { + (33926000 as Weight) + .saturating_add((208000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_scheduler.rs b/bin/node/runtime/src/weights/pallet_scheduler.rs new file mode 100644 index 0000000000000000000000000000000000000000..110a0545ed8396c57bcbd8568358956cb77e8ea6 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_scheduler.rs @@ -0,0 +1,51 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_scheduler::WeightInfo for WeightInfo { + fn schedule(s: u32, ) -> Weight { + (37_835_000 as Weight) + .saturating_add((81_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel(s: u32, ) -> Weight { + (34_707_000 as Weight) + .saturating_add((3_125_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn schedule_named(s: u32, ) -> Weight { + (48_065_000 as Weight) + .saturating_add((110_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn cancel_named(s: u32, ) -> Weight { + (38_776_000 as Weight) + .saturating_add((3_138_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_session.rs b/bin/node/runtime/src/weights/pallet_session.rs new file mode 100644 index 0000000000000000000000000000000000000000..3973936242a488b529e137343b0d74ef47e76325 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_session.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_session::WeightInfo for WeightInfo { + fn set_keys() -> Weight { + (88_411_000 as Weight) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn purge_keys() -> Weight { + (51_843_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_staking.rs b/bin/node/runtime/src/weights/pallet_staking.rs new file mode 100644 index 0000000000000000000000000000000000000000..f5a70830b92e7f3c0e54bb46416daef94d89dd17 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_staking.rs @@ -0,0 +1,170 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights of pallet-staking. +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_staking::WeightInfo for WeightInfo { + fn bond() -> Weight { + (144278000 as Weight) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn bond_extra() -> Weight { + (110715000 as Weight) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn unbond() -> Weight { + (99840000 as Weight) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn withdraw_unbonded_update(s: u32, ) -> Weight { + (100728000 as Weight) + .saturating_add((63000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + (168879000 as Weight) + .saturating_add((6666000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(7 as Weight)) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn validate() -> Weight { + (35539000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn nominate(n: u32, ) -> Weight { + (48596000 as Weight) + .saturating_add((308000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn chill() -> Weight { + (35144000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_payee() -> Weight { + (24255000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_controller() -> Weight { + (52294000 as Weight) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn set_validator_count() -> Weight { + (5185000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_no_eras() -> Weight { + (5907000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_new_era() -> Weight { + (5917000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_new_era_always() -> Weight { + (5952000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_invulnerables(v: u32, ) -> Weight { + (6324000 as Weight) + .saturating_add((9000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_unstake(s: u32, ) -> Weight { + (119691000 as Weight) + .saturating_add((6681000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn cancel_deferred_slash(s: u32, ) -> Weight { + (5820201000 as Weight) + .saturating_add((34672000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn payout_stakers_dead_controller(n: u32, ) -> Weight { + (0 as Weight) + .saturating_add((92486000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + } + fn payout_stakers_alive_staked(n: u32, ) -> Weight { + (0 as Weight) + .saturating_add((117324000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + } + fn rebond(l: u32, ) -> Weight { + (71316000 as Weight) + .saturating_add((142000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn set_history_depth(e: u32, ) -> Weight { + (0 as Weight) + .saturating_add((51901000 as Weight).saturating_mul(e as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + .saturating_add(DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + } + fn reap_stash(s: u32, ) -> Weight { + (147166000 as Weight) + .saturating_add((6661000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn new_era(v: u32, n: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1440459000 as Weight).saturating_mul(v as Weight)) + .saturating_add((182580000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads(10 as Weight)) + .saturating_add(DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { + (0 as Weight) + .saturating_add((964000 as Weight).saturating_mul(v as Weight)) + .saturating_add((432000 as Weight).saturating_mul(n as Weight)) + .saturating_add((204294000 as Weight).saturating_mul(a as Weight)) + .saturating_add((9546000 as Weight).saturating_mul(w as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/bin/node/runtime/src/weights/pallet_timestamp.rs b/bin/node/runtime/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..cfd5f192d35298b512ee75e4d26acf11355ce3ba --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_timestamp.rs @@ -0,0 +1,34 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_timestamp::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/bin/node/runtime/src/weights/pallet_treasury.rs b/bin/node/runtime/src/weights/pallet_treasury.rs new file mode 100644 index 0000000000000000000000000000000000000000..fe1a3166919f011456bfef97c636196ae5b0ac20 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_treasury.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_treasury::WeightInfo for WeightInfo { + fn propose_spend() -> Weight { + (79604000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_proposal() -> Weight { + (61001000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn approve_proposal() -> Weight { + (17835000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn report_awesome(r: u32, ) -> Weight { + (101602000 as Weight) + .saturating_add((2000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + // WARNING! Some components were not used: ["r"] + fn retract_tip() -> Weight { + (82970000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn tip_new(r: u32, t: u32, ) -> Weight { + (63995000 as Weight) + .saturating_add((2000 as Weight).saturating_mul(r as Weight)) + .saturating_add((153000 as Weight).saturating_mul(t as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn tip(t: u32, ) -> Weight { + (46765000 as Weight) + .saturating_add((711000 as Weight).saturating_mul(t as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn close_tip(t: u32, ) -> Weight { + (160874000 as Weight) + .saturating_add((379000 as Weight).saturating_mul(t as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn propose_bounty(d: u32, ) -> Weight { + (86198000 as Weight) + .saturating_add((1000 as Weight).saturating_mul(d as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn approve_bounty() -> Weight { + (23063000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn propose_curator() -> Weight { + (18890000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn unassign_curator() -> Weight { + (66768000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn accept_curator() -> Weight { + (69131000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn award_bounty() -> Weight { + (48184000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn claim_bounty() -> Weight { + (243104000 as Weight) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn close_bounty_proposed() -> Weight { + (65917000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_bounty_active() -> Weight { + (157232000 as Weight) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn extend_bounty_expiry() -> Weight { + (46216000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn on_initialize_proposals(p: u32, ) -> Weight { + (119765000 as Weight) + .saturating_add((108368000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + } + fn on_initialize_bounties(b: u32, ) -> Weight { + (112536000 as Weight) + .saturating_add((107132000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} diff --git a/bin/node/runtime/src/weights/pallet_utility.rs b/bin/node/runtime/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9ae0d7d2333b19bec65e4f5c1556df65b21e086 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_utility.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_utility::WeightInfo for WeightInfo { + fn batch(c: u32, ) -> Weight { + (16461000 as Weight) + .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + } + // WARNING! Some components were not used: ["u"] + fn as_derivative() -> Weight { + (4086000 as Weight) + } +} diff --git a/bin/node/runtime/src/weights/pallet_vesting.rs b/bin/node/runtime/src/weights/pallet_vesting.rs new file mode 100644 index 0000000000000000000000000000000000000000..b2a4b57e64415f5a23d65204acdc25c40121f066 --- /dev/null +++ b/bin/node/runtime/src/weights/pallet_vesting.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_vesting::WeightInfo for WeightInfo { + fn vest_locked(l: u32, ) -> Weight { + (82109000 as Weight) + .saturating_add((332000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn vest_unlocked(l: u32, ) -> Weight { + (88419000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn vest_other_locked(l: u32, ) -> Weight { + (81277000 as Weight) + .saturating_add((321000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn vest_other_unlocked(l: u32, ) -> Weight { + (87584000 as Weight) + .saturating_add((19000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn vested_transfer(l: u32, ) -> Weight { + (185916000 as Weight) + .saturating_add((625000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn force_vested_transfer(l: u32, ) -> Weight { + (185916000 as Weight) + .saturating_add((625000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } +} diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index a61a344cceed1b3898ca68a92096b241933f5074..3b541b696815b2837755b8e6533e1f04c730c6ef 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-testing" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Test utilities for Substrate node." edition = "2018" @@ -13,40 +13,39 @@ publish = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -sc-service = { version = "0.8.0-rc5", features = ["test-helpers", "db"], path = "../../../client/service" } -sc-client-db = { version = "0.8.0-rc5", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api/" } +pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } +sc-service = { version = "0.8.0", features = ["test-helpers", "db"], path = "../../../client/service" } +sc-client-db = { version = "0.8.0", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } +sc-client-api = { version = "2.0.0", path = "../../../client/api/" } codec = { package = "parity-scale-codec", version = "1.3.4" } -pallet-contracts = { version = "2.0.0-rc5", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } -pallet-indices = { version = "2.0.0-rc5", path = "../../../frame/indices" } -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -node-executor = { version = "2.0.0-rc5", path = "../executor" } -node-primitives = { version = "2.0.0-rc5", path = "../primitives" } -node-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -frame-support = { version = "2.0.0-rc5", path = "../../../frame/support" } -pallet-session = { version = "2.0.0-rc5", path = "../../../frame/session" } -pallet-society = { version = "2.0.0-rc5", path = "../../../frame/society" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -pallet-staking = { version = "2.0.0-rc5", path = "../../../frame/staking" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor", features = ["wasmtime"] } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -substrate-test-client = { version = "2.0.0-rc5", path = "../../../test-utils/client" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-rc5", path = "../../../frame/treasury" } -wabt = "0.9.2" -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/finality-tracker" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/timestamp" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } +pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } +sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +node-executor = { version = "2.0.0", path = "../executor" } +node-primitives = { version = "2.0.0", path = "../primitives" } +node-runtime = { version = "2.0.0", path = "../runtime" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } +frame-support = { version = "2.0.0", path = "../../../frame/support" } +pallet-session = { version = "2.0.0", path = "../../../frame/session" } +pallet-society = { version = "2.0.0", path = "../../../frame/society" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } +sc-executor = { version = "0.8.0", path = "../../../client/executor", features = ["wasmtime"] } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +frame-system = { version = "2.0.0", path = "../../../frame/system" } +substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } +pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } +sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } log = "0.4.8" tempfile = "3.1.0" fs_extra = "1" @@ -54,4 +53,4 @@ futures = "0.3.1" [dev-dependencies] criterion = "0.3.0" -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } +sc-cli = { version = "0.8.0", path = "../../../client/cli" } diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index e90ef12f6815c0bc9db9cd5b6dfdaac9c08c9309..da455e3ec4a6c01a3e72ccf78d09cb99fb1ac1be 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -1,21 +1,22 @@ [package] name = "chain-spec-builder" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -sc-keystore = { version = "2.0.0-rc5", path = "../../../client/keystore" } -sc-chain-spec = { version = "2.0.0-rc5", path = "../../../client/chain-spec" } -node-cli = { version = "2.0.0-rc5", path = "../../node/cli" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } +sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } +sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } +node-cli = { version = "2.0.0", path = "../../node/cli" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } rand = "0.7.2" structopt = "0.3.8" diff --git a/bin/utils/chain-spec-builder/README.md b/bin/utils/chain-spec-builder/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3e9ac0bddbdc17539d12893553789983e38ab05f --- /dev/null +++ b/bin/utils/chain-spec-builder/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 459df884d688444cc092392d8b7d8c9c5eb10078..fa0b345bc8406f2f4ea33228276fdb712b1b4845 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -1,40 +1,28 @@ [package] name = "subkey" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" + +[[bin]] +path = "src/main.rs" +name = "subkey" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.1.29" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -node-runtime = { version = "2.0.0-rc5", path = "../../node/runtime" } -node-primitives = { version = "2.0.0-rc5", path = "../../node/primitives" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -rand = "0.7.2" -clap = "2.33.0" -tiny-bip39 = "0.7" -substrate-bip39 = "0.4.1" -hex = "0.4.0" -hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.3.4" } -frame-system = { version = "2.0.0-rc5", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-rc5", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../../../frame/transaction-payment" } -pallet-grandpa = { version = "2.0.0-rc5", path = "../../../frame/grandpa" } -rpassword = "4.0.1" -itertools = "0.8.2" -derive_more = { version = "0.99.2" } -sc-rpc = { version = "2.0.0-rc5", path = "../../../client/rpc" } -jsonrpc-core-client = { version = "14.2.0", features = ["http"] } -hyper = "0.12.35" -libp2p = { version = "0.23.0", default-features = false } -serde_json = "1.0" +node-runtime = { version = "2.0.0", path = "../../node/runtime" } +node-primitives = { version = "2.0.0", path = "../../node/primitives" } +sc-cli = { version = "0.8.0", path = "../../../client/cli" } +substrate-frame-cli = { version = "2.0.0", path = "../../../utils/frame/frame-utilities-cli" } +structopt = "0.3.14" +frame-system = { version = "2.0.0", path = "../../../frame/system" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } [features] bench = [] diff --git a/bin/utils/subkey/README.adoc b/bin/utils/subkey/README.adoc index 1fa0753312f0cd6dc1f9a94c42866326bdf4f59d..5ce0d2d324470b51032f5c43c7978fb246691da1 100644 --- a/bin/utils/subkey/README.adoc +++ b/bin/utils/subkey/README.adoc @@ -31,7 +31,7 @@ OUTPUT: `subkey` expects a message to come in on STDIN, one way to sign a message would look like this: ```bash -echo -n | subkey sign +echo -n | subkey sign --suri OUTPUT: a69da4a6ccbf81dbbbfad235fa12cf8528c18012b991ae89214de8d20d29c1280576ced6eb38b7406d1b7e03231df6dd4a5257546ddad13259356e1c3adfb509 @@ -72,7 +72,7 @@ Will output a signed and encoded `UncheckedMortalCompactExtrinsic` as hex. === Inspecting a module ID ```bash -subkey --network kusama moduleid "py/trsry" +subkey module-id "py/trsry" --network kusama OUTPUT: Public Key URI `F3opxRbN5ZbjJNU511Kj2TLuzFcDq9BGduA9TgiECafpg29` is account: diff --git a/bin/utils/subkey/README.md b/bin/utils/subkey/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3e9ac0bddbdc17539d12893553789983e38ab05f --- /dev/null +++ b/bin/utils/subkey/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..051628e84a193419221d0eccd04f797dc2b56a23 --- /dev/null +++ b/bin/utils/subkey/src/lib.rs @@ -0,0 +1,82 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use structopt::StructOpt; +use sc_cli::{ + Error, VanityCmd, SignCmd, VerifyCmd, InsertCmd, + GenerateNodeKeyCmd, GenerateCmd, InspectKeyCmd, InspectNodeKeyCmd +}; +use substrate_frame_cli::ModuleIdCmd; +use sp_core::crypto::Ss58Codec; + +#[derive(Debug, StructOpt)] +#[structopt( + name = "subkey", + author = "Parity Team ", + about = "Utility for generating and restoring with Substrate keys", +)] +pub enum Subkey { + /// Generate a random node libp2p key, save it to file or print it to stdout + /// and print its peer ID to stderr. + GenerateNodeKey(GenerateNodeKeyCmd), + + /// Generate a random account + Generate(GenerateCmd), + + /// Gets a public key and a SS58 address from the provided Secret URI + Inspect(InspectKeyCmd), + + /// Print the peer ID corresponding to the node key in the given file + InspectNodeKey(InspectNodeKeyCmd), + + /// Insert a key to the keystore of a node. + Insert(InsertCmd), + + /// Inspect a module ID address + ModuleId(ModuleIdCmd), + + /// Sign a message, with a given (secret) key. + Sign(SignCmd), + + /// Generate a seed that provides a vanity address. + Vanity(VanityCmd), + + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + Verify(VerifyCmd), +} + +/// Run the subkey command, given the apropriate runtime. +pub fn run() -> Result<(), Error> + where + R: frame_system::Trait, + R::AccountId: Ss58Codec +{ + match Subkey::from_args() { + Subkey::GenerateNodeKey(cmd) => cmd.run()?, + Subkey::Generate(cmd) => cmd.run()?, + Subkey::Inspect(cmd) => cmd.run()?, + Subkey::InspectNodeKey(cmd) => cmd.run()?, + Subkey::Insert(cmd) => cmd.run()?, + Subkey::ModuleId(cmd) => cmd.run::()?, + Subkey::Vanity(cmd) => cmd.run()?, + Subkey::Verify(cmd) => cmd.run()?, + Subkey::Sign(cmd) => cmd.run()?, + }; + + Ok(()) +} diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index 9455e08175aa77a9e13492bfaffdb63819760287..dd14425130b7d32ce9ddb7123929a74aa63da93d 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -16,814 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![cfg_attr(feature = "bench", feature(test))] -#[cfg(feature = "bench")] -extern crate test; +//! Subkey utility, based on node_runtime. -use bip39::{Language, Mnemonic, MnemonicType}; -use clap::{App, ArgMatches, SubCommand}; -use codec::{Decode, Encode}; -use hex_literal::hex; -use itertools::Itertools; -use libp2p::identity::{ed25519 as libp2p_ed25519, PublicKey}; -use node_primitives::{Balance, Hash, Index, AccountId, Signature}; -use node_runtime::{BalancesCall, Call, Runtime, SignedPayload, UncheckedExtrinsic, VERSION}; -use serde_json::json; -use sp_core::{ - crypto::{set_default_ss58_version, Ss58AddressFormat, Ss58Codec}, - ed25519, sr25519, ecdsa, Pair, Public, H256, hexdisplay::HexDisplay, -}; -use sp_runtime::{traits::{AccountIdConversion, IdentifyAccount, Verify}, generic::Era, ModuleId}; -use std::{ - convert::{TryInto, TryFrom}, io::{stdin, Read}, str::FromStr, path::PathBuf, fs, fmt, -}; +use node_runtime::Runtime; -mod rpc; -mod vanity; - -enum OutputType { - Json, - Text, -} - -impl<'a> TryFrom<&'a str> for OutputType { - type Error = (); - - fn try_from(s: &'a str) -> Result { - match s { - "json" => Ok(OutputType::Json), - "text" => Ok(OutputType::Text), - _ => Err(()), - } - } - -} - -trait Crypto: Sized { - type Pair: Pair; - type Public: Public + Ss58Codec + AsRef<[u8]> + std::hash::Hash; - fn pair_from_suri(suri: &str, password: Option<&str>) -> Self::Pair { - Self::Pair::from_string(suri, password).expect("Invalid phrase") - } - fn ss58_from_pair(pair: &Self::Pair) -> String where - ::Public: PublicT, - { - pair.public().into_runtime().into_account().to_ss58check() - } - fn public_from_pair(pair: &Self::Pair) -> Self::Public { - pair.public() - } - fn print_from_uri( - uri: &str, - password: Option<&str>, - network_override: Option, - output: OutputType, - ) where - ::Public: PublicT, - { - let v = network_override.unwrap_or_default(); - if let Ok((pair, seed)) = Self::Pair::from_phrase(uri, password) { - let public_key = Self::public_from_pair(&pair); - - match output { - OutputType::Json => { - let json = json!({ - "secretPhrase": uri, - "networkId": String::from(v), - "secretSeed": format_seed::(seed), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": Self::ss58_from_pair(&pair), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Secret phrase `{}` is account:\n \ - Network ID/version: {}\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - String::from(v), - format_seed::(seed), - format_public_key::(public_key.clone()), - format_account_id::(public_key), - Self::ss58_from_pair(&pair), - ); - }, - } - } else if let Ok((pair, seed)) = Self::Pair::from_string_with_seed(uri, password) { - let public_key = Self::public_from_pair(&pair); - - match output { - OutputType::Json => { - let json = json!({ - "secretKeyUri": uri, - "networkId": String::from(v), - "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": Self::ss58_from_pair(&pair), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Secret Key URI `{}` is account:\n \ - Network ID/version: {}\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - String::from(v), - if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - format_public_key::(public_key.clone()), - format_account_id::(public_key), - Self::ss58_from_pair(&pair), - ); - }, - } - } else if let Ok((public_key, v)) = - ::Public::from_string_with_version(uri) - { - let v = network_override.unwrap_or(v); - - match output { - OutputType::Json => { - let json = json!({ - "publicKeyUri": uri, - "networkId": String::from(v), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key.clone()), - "ss58Address": public_key.to_ss58check_with_version(v), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Public Key URI `{}` is account:\n \ - Network ID/version: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", - uri, - String::from(v), - format_public_key::(public_key.clone()), - format_account_id::(public_key.clone()), - public_key.to_ss58check_with_version(v), - ); - }, - } - } else { - eprintln!("Invalid phrase/URI given"); - } - } -} - -struct Ed25519; - -impl Crypto for Ed25519 { - type Pair = ed25519::Pair; - type Public = ed25519::Public; - - fn pair_from_suri(suri: &str, password_override: Option<&str>) -> Self::Pair { - ed25519::Pair::from_legacy_string(suri, password_override) - } -} - -struct Sr25519; - -impl Crypto for Sr25519 { - type Pair = sr25519::Pair; - type Public = sr25519::Public; -} - -struct Ecdsa; - -impl Crypto for Ecdsa { - type Pair = ecdsa::Pair; - type Public = ecdsa::Public; -} - -type SignatureOf = <::Pair as Pair>::Signature; -type PublicOf = <::Pair as Pair>::Public; -type SeedOf = <::Pair as Pair>::Seed; -type AccountPublic = ::Signer; - -trait SignatureT: AsRef<[u8]> + AsMut<[u8]> + Default { - /// Converts the signature into a runtime account signature, if possible. If not possible, bombs out. - fn into_runtime(self) -> Signature { - panic!("This cryptography isn't supported for this runtime.") - } -} -trait PublicT: Sized + AsRef<[u8]> + Ss58Codec { - /// Converts the public key into a runtime account public key, if possible. If not possible, bombs out. - fn into_runtime(self) -> AccountPublic { - panic!("This cryptography isn't supported for this runtime.") - } -} - -impl SignatureT for sr25519::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl SignatureT for ed25519::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl SignatureT for ecdsa::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl PublicT for sr25519::Public { fn into_runtime(self) -> AccountPublic { self.into() } } -impl PublicT for ed25519::Public { fn into_runtime(self) -> AccountPublic { self.into() } } -impl PublicT for ecdsa::Public { fn into_runtime(self) -> AccountPublic { self.into() } } - -fn get_usage() -> String { - let networks = Ss58AddressFormat::all().iter().cloned().map(String::from).join("/"); - let default_network = String::from(Ss58AddressFormat::default()); - format!(" - -e, --ed25519 'Use Ed25519/BIP39 cryptography' - -k, --secp256k1 'Use SECP256k1/ECDSA/BIP39 cryptography' - -s, --sr25519 'Use Schnorr/Ristretto x25519/BIP39 cryptography' - [network] -n, --network 'Specify a network. One of {}. Default is {}' - [password] -p, --password 'The password for the key' - --password-interactive 'You will be prompted for the password for the key.' - [output] -o, --output 'Specify an output format. One of text, json. Default is text.' - ", networks, default_network) -} - -fn get_app<'a, 'b>(usage: &'a str) -> App<'a, 'b> { - App::new("subkey") - .author("Parity Team ") - .about("Utility for generating and restoring with Substrate keys") - .version(env!("CARGO_PKG_VERSION")) - .args_from_usage(usage) - .subcommands(vec![ - SubCommand::with_name("generate") - .about("Generate a random account") - .args_from_usage("[words] -w, --words \ - 'The number of words in the phrase to generate. One of 12 \ - (default), 15, 18, 21 and 24.' - "), - SubCommand::with_name("generate-node-key") - .about("Generate a random node libp2p key, save it to file and print its peer ID") - .args_from_usage("[file] 'Name of file to save secret key to'"), - SubCommand::with_name("inspect") - .about("Gets a public key and a SS58 address from the provided Secret URI") - .args_from_usage("[uri] 'A Key URI to be inspected. May be a secret seed, \ - secret URI (with derivation paths and password), SS58 or public URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - "), - SubCommand::with_name("inspect-node-key") - .about("Print the peer ID corresponding to the node key in the given file") - .args_from_usage("[file] 'Name of file to read the secret key from'"), - SubCommand::with_name("sign") - .about("Sign a message, provided on STDIN, with a given (secret) key") - .args_from_usage(" - -h, --hex 'The message on STDIN is hex-encoded data' - 'The secret key URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - "), - SubCommand::with_name("sign-transaction") - .about("Sign transaction from encoded Call. Returns a signed and encoded \ - UncheckedMortalCompactExtrinsic as hex.") - .args_from_usage(" - -c, --call 'The call, hex-encoded.' - -n, --nonce 'The nonce.' - -p, --password 'The password for the key.' - -h, --prior-block-hash 'The prior block hash, hex-encoded.' - -s, --suri 'The secret key URI.' - "), - SubCommand::with_name("transfer") - .about("Author and sign a Node pallet_balances::Transfer transaction with a given (secret) key") - .args_from_usage(" - -g, --genesis 'The genesis hash or a recognized \ - chain identifier (dev, elm, alex).' - 'The signing secret key URI.' - 'The destination account public key URI.' - 'The number of units to transfer.' - 'The signing account's transaction index.' - "), - SubCommand::with_name("vanity") - .about("Generate a seed that provides a vanity address") - .args_from_usage(" - -n, --number 'Number of keys to generate' - 'Desired pattern' - "), - SubCommand::with_name("verify") - .about("Verify a signature for a message, provided on STDIN, with a given \ - (public or secret) key") - .args_from_usage(" - -h, --hex 'The message on STDIN is hex-encoded data' - 'Signature, hex-encoded.' - 'The public or secret key URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - "), - SubCommand::with_name("insert") - .about("Insert a key to the keystore of a node") - .args_from_usage(" - 'The secret key URI. \ - If the value is a file, the file content is used as URI. \ - If not given, you will be prompted for the URI.' - 'Key type, examples: \"gran\", or \"imon\" ' - [node-url] 'Node JSON-RPC endpoint, default \"http:://localhost:9933\"' - "), - SubCommand::with_name("moduleid") - .about("Inspect a module ID address") - .args_from_usage(" - 'The module ID used to derive the account' - ") - ]) -} - -fn main() -> Result<(), Error> { - let usage = get_usage(); - let matches = get_app(&usage).get_matches(); - - if matches.is_present("ed25519") { - return execute::(matches); - } - if matches.is_present("secp256k1") { - return execute::(matches) - } - return execute::(matches) -} - -/// Get `URI` from CLI or prompt the user. -/// -/// `URI` is extracted from `matches` by using `match_name`. -/// -/// If the `URI` given as CLI argument is a file, the file content is taken as `URI`. -/// If no `URI` is given to the CLI, the user is prompted for it. -fn get_uri(match_name: &str, matches: &ArgMatches) -> Result { - let uri = if let Some(uri) = matches.value_of(match_name) { - let file = PathBuf::from(uri); - if file.is_file() { - fs::read_to_string(uri)? - .trim_end() - .into() - } else { - uri.into() - } - } else { - rpassword::read_password_from_tty(Some("URI: "))? - }; - - Ok(uri) -} - -#[derive(derive_more::Display, derive_more::From)] -enum Error { - Static(&'static str), - Io(std::io::Error), - Formatted(String), -} - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } -} - -fn static_err(msg: &'static str) -> Result<(), Error> { - Err(Error::Static(msg)) -} - -fn execute(matches: ArgMatches) -> Result<(), Error> -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let password_interactive = matches.is_present("password-interactive"); - let password = matches.value_of("password"); - - let password = if password.is_some() && password_interactive { - return static_err("`--password` given and `--password-interactive` selected!"); - } else if password_interactive { - Some( - rpassword::read_password_from_tty(Some("Key password: "))? - ) - } else { - password.map(Into::into) - }; - let password = password.as_ref().map(String::as_str); - - let maybe_network: Option = match matches.value_of("network").map(|network| { - network - .try_into() - .map_err(|_| Error::Static("Invalid network name. See --help for available networks.")) - }) { - Some(Err(e)) => return Err(e), - Some(Ok(v)) => Some(v), - None => None, - }; - - if let Some(network) = maybe_network { - set_default_ss58_version(network); - } - - let output: OutputType = match matches.value_of("output").map(TryInto::try_into) { - Some(Err(_)) => return Err(Error::Static("Invalid output name. See --help for available outputs.")), - Some(Ok(v)) => v, - None => OutputType::Text, - }; - - match matches.subcommand() { - ("generate", Some(matches)) => { - let mnemonic = generate_mnemonic(matches)?; - C::print_from_uri(mnemonic.phrase(), password, maybe_network, output); - } - ("generate-node-key", Some(matches)) => { - let file = matches.value_of("file").ok_or(Error::Static("Output file name is required"))?; - - let keypair = libp2p_ed25519::Keypair::generate(); - let secret = keypair.secret(); - let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); - - fs::write(file, secret.as_ref())?; - - println!("{}", peer_id); - } - ("inspect", Some(matches)) => { - C::print_from_uri(&get_uri("uri", &matches)?, password, maybe_network, output); - } - ("inspect-node-key", Some(matches)) => { - let file = matches.value_of("file").ok_or(Error::Static("Input file name is required"))?; - - let mut file_content = fs::read(file)?; - let secret = libp2p_ed25519::SecretKey::from_bytes(&mut file_content) - .map_err(|_| Error::Static("Bad node key file"))?; - let keypair = libp2p_ed25519::Keypair::from(secret); - let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); - - println!("{}", peer_id); - } - ("sign", Some(matches)) => { - let suri = get_uri("suri", &matches)?; - let should_decode = matches.is_present("hex"); - - let message = read_message_from_stdin(should_decode)?; - let signature = do_sign::(&suri, message, password)?; - println!("{}", signature); - } - ("verify", Some(matches)) => { - let uri = get_uri("uri", &matches)?; - let should_decode = matches.is_present("hex"); - - let message = read_message_from_stdin(should_decode)?; - let is_valid_signature = do_verify::(matches, &uri, message)?; - if is_valid_signature { - println!("Signature verifies correctly."); - } else { - return static_err("Signature invalid."); - } - } - ("vanity", Some(matches)) => { - let desired: String = matches - .value_of("pattern") - .map(str::to_string) - .unwrap_or_default(); - let result = vanity::generate_key::(&desired)?; - let formated_seed = format_seed::(result.seed); - C::print_from_uri(&formated_seed, None, maybe_network, output); - } - ("transfer", Some(matches)) => { - let signer = read_pair::(matches.value_of("from"), password)?; - let index = read_required_parameter::(matches, "index")?; - let genesis_hash = read_genesis_hash(matches)?; - - let to: AccountId = read_account_id(matches.value_of("to")); - let amount = read_required_parameter::(matches, "amount")?; - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); - - let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); - - print_extrinsic(extrinsic); - } - ("sign-transaction", Some(matches)) => { - let signer = read_pair::(matches.value_of("suri"), password)?; - let index = read_required_parameter::(matches, "nonce")?; - let genesis_hash = read_genesis_hash(matches)?; - - let call = matches.value_of("call").expect("call is required; qed"); - let function: Call = hex::decode(&call) - .ok() - .and_then(|x| Decode::decode(&mut &x[..]).ok()) - .unwrap(); - - let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); - - print_extrinsic(extrinsic); - } - ("insert", Some(matches)) => { - let suri = get_uri("suri", &matches)?; - let pair = read_pair::(Some(&suri), password)?; - let node_url = matches.value_of("node-url").unwrap_or("http://localhost:9933"); - let key_type = matches.value_of("key-type").ok_or(Error::Static("Key type id is required"))?; - - // Just checking - let _key_type_id = sp_core::crypto::KeyTypeId::try_from(key_type) - .map_err(|_| Error::Static("Cannot convert argument to keytype: argument should be 4-character string"))?; - - let rpc = rpc::RpcClient::new(node_url.to_string()); - - rpc.insert_key( - key_type.to_string(), - suri, - sp_core::Bytes(pair.public().as_ref().to_vec()), - ); - } - ("moduleid", Some(matches)) => { - let id = get_uri("id", &matches)?; - if id.len() != 8 { - Err("a module id must be a string of 8 characters")? - } - - let id_fixed_array: [u8; 8] = id.as_bytes().try_into() - .map_err(|_| Error::Static("Cannot convert argument to moduleid: argument should be 8-character string"))?; - - let account_id: AccountId = ModuleId(id_fixed_array).into_account(); - let v = maybe_network.unwrap_or(Ss58AddressFormat::SubstrateAccount); - - C::print_from_uri(&account_id.to_ss58check_with_version(v), password, maybe_network, output); - } - _ => print_usage(&matches), - } - - Ok(()) -} - -/// Creates a new randomly generated mnemonic phrase. -fn generate_mnemonic(matches: &ArgMatches) -> Result { - let words = match matches.value_of("words") { - Some(words) => { - let num = usize::from_str(words).map_err(|_| Error::Static("Invalid number given for --words"))?; - MnemonicType::for_word_count(num) - .map_err(|_| Error::Static("Invalid number of words given for phrase: must be 12/15/18/21/24"))? - }, - None => MnemonicType::Words12, - }; - Ok(Mnemonic::new(words, Language::English)) -} - -fn do_sign(suri: &str, message: Vec, password: Option<&str>) -> Result -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let pair = read_pair::(Some(suri), password)?; - let signature = pair.sign(&message); - Ok(format_signature::(&signature)) -} - -fn do_verify(matches: &ArgMatches, uri: &str, message: Vec) -> Result -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - - let signature = read_signature::(matches)?; - let pubkey = read_public_key::(Some(uri)); - Ok(<::Pair as Pair>::verify(&signature, &message, &pubkey)) -} - -fn decode_hex>(message: T) -> Result, Error> { - hex::decode(message).map_err(|e| Error::Formatted(format!("Invalid hex ({})", e))) -} - -fn read_message_from_stdin(should_decode: bool) -> Result, Error> { - let mut message = vec![]; - stdin() - .lock() - .read_to_end(&mut message)?; - if should_decode { - message = decode_hex(&message)?; - } - Ok(message) -} - -fn read_required_parameter(matches: &ArgMatches, name: &str) -> Result where - ::Err: std::fmt::Debug, -{ - let str_value = matches - .value_of(name) - .expect("parameter is required; thus it can't be None; qed"); - str::parse::(str_value).map_err(|_| - Error::Formatted(format!("Invalid `{}' parameter; expecting an integer.", name)) - ) -} - -fn read_genesis_hash(matches: &ArgMatches) -> Result { - let genesis_hash: Hash = match matches.value_of("genesis").unwrap_or("alex") { - "elm" => hex!["10c08714a10c7da78f40a60f6f732cf0dba97acfb5e2035445b032386157d5c3"].into(), - "alex" => hex!["dcd1346701ca8396496e52aa2785b1748deb6db09551b72159dcb3e08991025b"].into(), - h => Decode::decode(&mut &decode_hex(h)?[..]) - .expect("Invalid genesis hash or unrecognized chain identifier"), - }; - println!( - "Using a genesis hash of {}", - HexDisplay::from(&genesis_hash.as_ref()) - ); - Ok(genesis_hash) -} - -fn read_signature(matches: &ArgMatches) -> Result, Error> -where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let sig_data = matches - .value_of("sig") - .expect("signature parameter is required; thus it can't be None; qed"); - let mut signature = <::Pair as Pair>::Signature::default(); - let sig_data = decode_hex(sig_data)?; - if sig_data.len() != signature.as_ref().len() { - return Err(Error::Formatted(format!( - "signature has an invalid length. read {} bytes, expected {} bytes", - sig_data.len(), - signature.as_ref().len(), - ))); - } - signature.as_mut().copy_from_slice(&sig_data); - Ok(signature) -} - -fn read_public_key(matched_uri: Option<&str>) -> PublicOf -where - PublicOf: PublicT, -{ - let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - uri - }; - if let Ok(pubkey_vec) = hex::decode(uri) { - ::Public::from_slice(pubkey_vec.as_slice()) - } else { - ::Public::from_string(uri) - .ok() - .expect("Invalid URI; expecting either a secret URI or a public URI.") - } -} - -fn read_account_id(matched_uri: Option<&str>) -> AccountId { - let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - uri - }; - if let Ok(data_vec) = hex::decode(uri) { - AccountId::try_from(data_vec.as_slice()) - .expect("Invalid hex length for account ID; should be 32 bytes") - } else { - AccountId::from_ss58check(uri).ok() - .expect("Invalid SS58-check address given for account ID.") - } -} - -fn read_pair( - matched_suri: Option<&str>, - password: Option<&str>, -) -> Result<::Pair, Error> where - SignatureOf: SignatureT, - PublicOf: PublicT, -{ - let suri = matched_suri.ok_or(Error::Static("parameter is required; thus it can't be None; qed"))?; - Ok(C::pair_from_suri(suri, password)) -} - -fn format_signature(signature: &SignatureOf) -> String { - format!("{}", HexDisplay::from(&signature.as_ref())) -} - -fn format_seed(seed: SeedOf) -> String { - format!("0x{}", HexDisplay::from(&seed.as_ref())) -} - -fn format_public_key(public_key: PublicOf) -> String { - format!("0x{}", HexDisplay::from(&public_key.as_ref())) -} - -fn format_account_id(public_key: PublicOf) -> String where - PublicOf: PublicT, -{ - format!("0x{}", HexDisplay::from(&public_key.into_runtime().into_account().as_ref())) -} - -fn create_extrinsic( - function: Call, - index: Index, - signer: C::Pair, - genesis_hash: H256, -) -> UncheckedExtrinsic where - PublicOf: PublicT, - SignatureOf: SignatureT, -{ - let extra = |i: Index, f: Balance| { - ( - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(Era::Immortal), - frame_system::CheckNonce::::from(i), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(f), - ) - }; - let raw_payload = SignedPayload::from_raw( - function, - extra(index, 0), - ( - VERSION.spec_version, - VERSION.transaction_version, - genesis_hash, - genesis_hash, - (), - (), - (), - ), - ); - let signature = raw_payload.using_encoded(|payload| signer.sign(payload)).into_runtime(); - let signer = signer.public().into_runtime(); - let (function, extra, _) = raw_payload.deconstruct(); - - UncheckedExtrinsic::new_signed( - function, - signer.into_account().into(), - signature, - extra, - ) -} - -fn print_extrinsic(extrinsic: UncheckedExtrinsic) { - println!("0x{}", HexDisplay::from(&extrinsic.encode())); -} - -fn print_usage(matches: &ArgMatches) { - println!("{}", matches.usage()); -} - -#[cfg(test)] -mod tests { - use super::*; - - fn test_generate_sign_verify() - where - SignatureOf: SignatureT, - PublicOf: PublicT, - { - let usage = get_usage(); - let app = get_app(&usage); - let password = None; - - // Generate public key and seed. - let arg_vec = vec!["subkey", "generate"]; - - let matches = app.clone().get_matches_from(arg_vec); - let matches = matches.subcommand().1.unwrap(); - let mnemonic = generate_mnemonic(matches).expect("generate failed"); - - let (pair, seed) = - <::Pair as Pair>::from_phrase(mnemonic.phrase(), password) - .unwrap(); - let public_key = CryptoType::public_from_pair(&pair); - let public_key = format_public_key::(public_key); - let seed = format_seed::(seed); - let message = "Blah Blah\n".as_bytes().to_vec(); - - let signature = do_sign::(&seed, message.clone(), password).expect("signing failed"); - - // Verify the previous signature. - let arg_vec = vec!["subkey", "verify", &signature[..], &public_key[..]]; - - let matches = get_app(&usage).get_matches_from(arg_vec); - let matches = matches.subcommand().1.unwrap(); - - assert!(do_verify::(matches, &public_key, message).expect("verify failed")); - } - - #[test] - fn generate_sign_verify_should_work_for_ed25519() { - test_generate_sign_verify::(); - } - - #[test] - fn generate_sign_verify_should_work_for_sr25519() { - test_generate_sign_verify::(); - } - - #[test] - fn should_work() { - let s = "0123456789012345678901234567890123456789012345678901234567890123"; - - let d1: Hash = hex::decode(s) - .ok() - .and_then(|x| Decode::decode(&mut &x[..]).ok()) - .unwrap(); - - let d2: Hash = { - let mut gh: [u8; 32] = Default::default(); - gh.copy_from_slice(hex::decode(s).unwrap().as_ref()); - Hash::from(gh) - }; - - assert_eq!(d1, d2); - } +fn main() -> Result<(), sc_cli::Error> { + subkey::run::() } diff --git a/bin/utils/subkey/src/rpc.rs b/bin/utils/subkey/src/rpc.rs deleted file mode 100644 index e24cf50dc45026615891782cc2440a3eda5749a0..0000000000000000000000000000000000000000 --- a/bin/utils/subkey/src/rpc.rs +++ /dev/null @@ -1,51 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Helper to run commands against current node RPC - -use futures::Future; -use hyper::rt; -use node_primitives::Hash; -use sc_rpc::author::AuthorClient; -use jsonrpc_core_client::transports::http; -use sp_core::Bytes; - -pub struct RpcClient { url: String } - -impl RpcClient { - pub fn new(url: String) -> Self { Self { url } } - - pub fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) { - let url = self.url.clone(); - - rt::run( - http::connect(&url) - .and_then(|client: AuthorClient| { - client.insert_key(key_type, suri, public).map(|_| ()) - }) - .map_err(|e| { - eprintln!("Error inserting key: {:?}", e); - }) - ); - } -} diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 45601771a4033d0498204931aba588c681fd3145..1b8240a4b002683cc2400b7a4fda3761cfa066dc 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-api" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -8,42 +8,43 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate client interfaces." documentation = "https://docs.rs/sc-client-api" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } -fnv = { version = "1.0.6" } -futures = { version = "0.3.1" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +derive_more = "0.99.2" +sc-executor = { version = "0.8.0", path = "../executor" } +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +fnv = "1.0.6" +futures = "0.3.1" hash-db = { version = "0.15.2", default-features = false } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +hex-literal = "0.3.1" +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } kvdb = "0.7.0" -log = { version = "0.4.8" } +log = "0.4.8" parking_lot = "0.10.0" lazy_static = "1.4.0" -sp-database = { version = "2.0.0-rc5", path = "../../primitives/database" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/version" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc5", path = "../../utils/prometheus" } +sp-database = { version = "2.0.0", path = "../../primitives/database" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0", path = "../telemetry" } +sp-trie = { version = "2.0.0", path = "../../primitives/trie" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } [dev-dependencies] -kvdb-memorydb = "0.6.0" -sp-test-primitives = { version = "2.0.0-rc5", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../test-utils/runtime" } +kvdb-memorydb = "0.7.0" +sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } +substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } diff --git a/client/api/README.md b/client/api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..142f5b32dd9a8de7c083aac5260ac6677042c3ad --- /dev/null +++ b/client/api/README.md @@ -0,0 +1,3 @@ +Substrate client interfaces. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index efc5ca4ee8ca054a9944ecac2c56a84b63f812bb..47fec977f5e827361b6190bce8fbd49f045b61be 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -536,3 +536,21 @@ pub fn changes_tries_state_at_block<'a, Block: BlockT>( None => Ok(None), } } + +/// Provide CHT roots. These are stored on a light client and generated dynamically on a full +/// client. +pub trait ProvideChtRoots { + /// Get headers CHT root for given block. Returns None if the block is not a part of any CHT. + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result>; + + /// Get changes trie CHT root for given block. Returns None if the block is not a part of any CHT. + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result>; +} diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 306c3c2b2f10cc8e6d121817b153b8985953ea5b..ded030fb8046f6279d384d1b9e395e3b8345d6fd 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -35,7 +35,7 @@ use sp_state_machine::{ use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; use crate::{ - backend::{self, NewBlockState}, + backend::{self, NewBlockState, ProvideChtRoots}, blockchain::{ self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId }, @@ -447,6 +447,16 @@ impl light::Storage for Blockchain Blockchain::finalize_header(self, id, None) } + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } +} + +impl ProvideChtRoots for Blockchain { fn header_cht_root( &self, _cht_size: NumberFor, @@ -466,14 +476,6 @@ impl light::Storage for Blockchain .ok_or_else(|| sp_blockchain::Error::Backend(format!("Changes trie CHT for block {} not exists", block))) .map(Some) } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } } /// In-memory operation. diff --git a/client/api/src/light.rs b/client/api/src/light.rs index b359c1149eea645e27f584122dfaf328c168ac47..144851dac0075f9c2a31f01225aaee5bb9504178 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -32,7 +32,7 @@ use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, Error as ClientError, Result as ClientResult, }; -use crate::{backend::{AuxStore, NewBlockState}, UsageInfo}; +use crate::{backend::{AuxStore, NewBlockState}, UsageInfo, ProvideChtRoots}; /// Remote call request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] @@ -232,7 +232,9 @@ pub trait FetchChecker: Send + Sync { /// Light client blockchain storage. -pub trait Storage: AuxStore + HeaderBackend + HeaderMetadata { +pub trait Storage: AuxStore + HeaderBackend + + HeaderMetadata + ProvideChtRoots +{ /// Store new header. Should refuse to revert any finalized blocks. /// /// Takes new authorities, the leaf state of the new block, and @@ -254,20 +256,6 @@ pub trait Storage: AuxStore + HeaderBackend + HeaderMetada /// Get last finalized header. fn last_finalized(&self) -> ClientResult; - /// Get headers CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult>; - - /// Get changes trie CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult>; - /// Get storage cache. fn cache(&self) -> Option>>; diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 3f3e5b5589473a0df6819de0e45034656eed5175..d611554edc123687cebf22f6fe2739ca7bf88cd7 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-authority-discovery" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -8,6 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate authority discovery." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,25 +20,26 @@ prost-build = "0.6.1" bytes = "0.5.0" codec = { package = "parity-scale-codec", default-features = false, version = "1.3.4" } derive_more = "0.99.2" +either = "1.5.3" futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.23.0", default-features = false, features = ["kad"] } +libp2p = { version = "0.28.1", default-features = false, features = ["kad"] } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} prost = "0.6.1" rand = "0.7.2" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } -sc-network = { version = "0.8.0-rc5", path = "../network" } +sc-client-api = { version = "2.0.0", path = "../api" } +sc-keystore = { version = "2.0.0", path = "../keystore" } +sc-network = { version = "0.8.0", path = "../network" } serde_json = "1.0.41" -sp-authority-discovery = { version = "2.0.0-rc5", path = "../../primitives/authority-discovery" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sp-authority-discovery = { version = "2.0.0", path = "../../primitives/authority-discovery" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } [dev-dependencies] -env_logger = "0.7.0" quickcheck = "0.9.0" -sc-peerset = { version = "2.0.0-rc5", path = "../peerset" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client"} +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sc-peerset = { version = "2.0.0", path = "../peerset" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client"} diff --git a/client/authority-discovery/README.md b/client/authority-discovery/README.md new file mode 100644 index 0000000000000000000000000000000000000000..54c51d5ba04f4c6f8a0fa7e01f4ba094737899c6 --- /dev/null +++ b/client/authority-discovery/README.md @@ -0,0 +1,9 @@ +Substrate authority discovery. + +This crate enables Substrate authorities to discover and directly connect to +other authorities. It is split into two components the [`Worker`] and the +[`Service`]. + +See [`Worker`] and [`Service`] for more documentation. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index b1358485c37eeb25edd77d1c5273a8f1316dcaf2..48bcdf33114b11d33e9331a8b7c221350b8d105b 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -34,10 +34,8 @@ pub enum Error { HashingAuthorityId(libp2p::core::multiaddr::multihash::EncodeError), /// Failed calling into the Substrate runtime. CallingRuntime(sp_blockchain::Error), - /// From the Dht we only get the hashed authority id. In order to retrieve the actual authority id and to ensure it - /// is actually an authority, we match the hash against the hash of the authority id of all other authorities. This - /// error is the result of the above failing. - MatchingHashedAuthorityIdWithAuthorityId, + /// Received a dht record with a key that does not match any in-flight awaited keys. + ReceivingUnexpectedRecord, /// Failed to set the authority discovery peerset priority group in the peerset module. SettingPeersetPriorityGroup(String), /// Failed to encode a protobuf payload. diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index 01fb7134fb5d1b6958b13e464fa60e4a2b622331..ed0205d262fc60934c50bb04f0a17dd8f50c2e5e 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -37,12 +37,18 @@ impl Service { } } - /// Get the addresses for the given [`AuthorityId`] from the local address cache. + /// Get the addresses for the given [`AuthorityId`] from the local address + /// cache. /// - /// Returns `None` if no entry was present or connection to the [`crate::Worker`] failed. + /// Returns `None` if no entry was present or connection to the + /// [`crate::Worker`] failed. /// - /// [`Multiaddr`]s returned always include a [`libp2p::core::multiaddr:Protocol::P2p`] - /// component. + /// [`Multiaddr`]s returned always include a [`PeerId`] via a + /// [`libp2p::core::multiaddr:Protocol::P2p`] component. [`Multiaddr`]s + /// might differ in their [`PeerId`], e.g. when each [`Multiaddr`] + /// represents a different sentry node. This might change once support for + /// sentry nodes is removed (see + /// https://github.com/paritytech/substrate/issues/6845). pub async fn get_addresses_by_authority_id(&mut self, authority: AuthorityId) -> Option> { let (tx, rx) = oneshot::channel(); @@ -54,9 +60,11 @@ impl Service { rx.await.ok().flatten() } - /// Get the [`AuthorityId`] for the given [`PeerId`] from the local address cache. + /// Get the [`AuthorityId`] for the given [`PeerId`] from the local address + /// cache. /// - /// Returns `None` if no entry was present or connection to the [`crate::Worker`] failed. + /// Returns `None` if no entry was present or connection to the + /// [`crate::Worker`] failed. pub async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option { let (tx, rx) = oneshot::channel(); diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 16f19489f94b73047920e0a5592cbe5b5afb300e..ff4d12dadd98871a23a94a4a2925b069dea39077 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -30,10 +30,12 @@ use futures_timer::Delay; use addr_cache::AddrCache; use codec::Decode; -use libp2p::core::multiaddr; +use either::Either; +use libp2p::{core::multiaddr, multihash::Multihash}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; use prost::Message; +use rand::{seq::SliceRandom, thread_rng}; use sc_client_api::blockchain::HeaderBackend; use sc_network::{ config::MultiaddrWithPeerId, @@ -66,6 +68,12 @@ const LIBP2P_KADEMLIA_BOOTSTRAP_TIME: Duration = Duration::from_secs(30); /// discovery module. const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities"; +/// Maximum number of addresses cached per authority. Additional addresses are discarded. +const MAX_ADDRESSES_PER_AUTHORITY: usize = 10; + +/// Maximum number of in-flight DHT lookups at any given point in time. +const MAX_IN_FLIGHT_LOOKUPS: usize = 8; + /// Role an authority discovery module can run as. pub enum Role { /// Actual authority as well as a reference to its key store. @@ -95,7 +103,7 @@ pub enum Role { /// /// 2. **Discovers other authorities** /// -/// 1. Retrieves the current set of authorities. +/// 1. Retrieves the current and next set of authorities. /// /// 2. Starts DHT queries for the ids of the authorities. /// @@ -133,12 +141,17 @@ where /// Interval to be proactive, publishing own addresses. publish_interval: Interval, - /// Interval on which to query for addresses of other authorities. + /// Interval at which to request addresses of authorities, refilling the pending lookups queue. query_interval: Interval, /// Interval on which to set the peerset priority group to a new random /// set of addresses. priority_group_set_interval: Interval, + /// Queue of throttled lookups pending to be passed to the network. + pending_lookups: Vec, + /// Set of in-flight lookups. + in_flight_lookups: HashMap, + addr_cache: addr_cache::AddrCache, metrics: Option, @@ -179,8 +192,8 @@ where Duration::from_secs(12 * 60 * 60), ); - // External addresses of other authorities can change at any given point in time. The - // interval on which to query for external addresses of other authorities is a trade off + // External addresses of remote authorities can change at any given point in time. The + // interval on which to trigger new queries for the current authorities is a trade off // between efficiency and performance. let query_interval_start = Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME; let query_interval_duration = Duration::from_secs(10 * 60); @@ -189,9 +202,9 @@ where // Querying 500 [`AuthorityId`]s takes ~1m on the Kusama DHT (10th of August 2020) when // comparing `authority_discovery_authority_addresses_requested_total` and // `authority_discovery_dht_event_received`. With that in mind set the peerset priority - // group on the same interval as the [`query_interval`] above, just delayed by 2 minutes. + // group on the same interval as the [`query_interval`] above, just delayed by 5 minutes. let priority_group_set_interval = interval_at( - query_interval_start + Duration::from_secs(2 * 60), + query_interval_start + Duration::from_secs(5 * 60), query_interval_duration, ); @@ -225,6 +238,8 @@ where publish_interval, query_interval, priority_group_set_interval, + pending_lookups: Vec::new(), + in_flight_lookups: HashMap::new(), addr_cache, role, metrics, @@ -232,6 +247,26 @@ where } } + fn addresses_to_publish(&self) -> impl ExactSizeIterator { + match &self.sentry_nodes { + Some(addrs) => Either::Left(addrs.clone().into_iter()), + None => { + let peer_id: Multihash = self.network.local_peer_id().into(); + Either::Right( + self.network.external_addresses() + .into_iter() + .map(move |a| { + if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { + a + } else { + a.with(multiaddr::Protocol::P2p(peer_id.clone())) + } + }), + ) + } + } + } + /// Publish either our own or if specified the public addresses of our sentry nodes. fn publish_ext_addresses(&mut self) -> Result<()> { let key_store = match &self.role { @@ -242,29 +277,17 @@ where Role::Sentry => return Ok(()), }; - if let Some(metrics) = &self.metrics { - metrics.publish.inc() - } - - let addresses: Vec<_> = match &self.sentry_nodes { - Some(addrs) => addrs.clone().into_iter() - .map(|a| a.to_vec()) - .collect(), - None => self.network.external_addresses() - .into_iter() - .map(|a| a.with(multiaddr::Protocol::P2p( - self.network.local_peer_id().into(), - ))) - .map(|a| a.to_vec()) - .collect(), - }; + let addresses = self.addresses_to_publish(); if let Some(metrics) = &self.metrics { - metrics.amount_last_published.set(addresses.len() as u64); + metrics.publish.inc(); + metrics.amount_addresses_last_published.set( + addresses.len().try_into().unwrap_or(std::u64::MAX), + ); } let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses } + schema::AuthorityAddresses { addresses: addresses.map(|a| a.to_vec()).collect() } .encode(&mut serialized_addresses) .map_err(Error::EncodingProto)?; @@ -304,15 +327,9 @@ where Ok(()) } - fn request_addresses_of_others(&mut self) -> Result<()> { + fn refill_pending_lookups_queue(&mut self) -> Result<()> { let id = BlockId::hash(self.client.info().best_hash); - let authorities = self - .client - .runtime_api() - .authorities(&id) - .map_err(Error::CallingRuntime)?; - let local_keys = match &self.role { Role::Authority(key_store) => { key_store.read() @@ -323,21 +340,52 @@ where Role::Sentry => HashSet::new(), }; - for authority_id in authorities.iter() { - // Make sure we don't look up our own keys. - if !local_keys.contains(authority_id.as_ref()) { - if let Some(metrics) = &self.metrics { - metrics.request.inc(); - } + let mut authorities = self + .client + .runtime_api() + .authorities(&id) + .map_err(Error::CallingRuntime)? + .into_iter() + .filter(|id| !local_keys.contains(id.as_ref())) + .collect(); - self.network - .get_value(&hash_authority_id(authority_id.as_ref())); - } + self.addr_cache.retain_ids(&authorities); + + authorities.shuffle(&mut thread_rng()); + self.pending_lookups = authorities; + // Ignore all still in-flight lookups. Those that are still in-flight are likely stalled as + // query interval ticks are far enough apart for all lookups to succeed. + self.in_flight_lookups.clear(); + + if let Some(metrics) = &self.metrics { + metrics.requests_pending.set( + self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX), + ); } Ok(()) } + fn start_new_lookups(&mut self) { + while self.in_flight_lookups.len() < MAX_IN_FLIGHT_LOOKUPS { + let authority_id = match self.pending_lookups.pop() { + Some(authority) => authority, + None => return, + }; + let hash = hash_authority_id(authority_id.as_ref()); + self.network + .get_value(&hash); + self.in_flight_lookups.insert(hash, authority_id); + + if let Some(metrics) = &self.metrics { + metrics.requests.inc(); + metrics.requests_pending.set( + self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX), + ); + } + } + } + /// Handle incoming Dht events. /// /// Returns either: @@ -375,10 +423,17 @@ where metrics.dht_event_received.with_label_values(&["value_not_found"]).inc(); } - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' not found on Dht.", hash - ) + if self.in_flight_lookups.remove(&hash).is_some() { + debug!( + target: LOG_TARGET, + "Value for hash '{:?}' not found on Dht.", hash + ) + } else { + debug!( + target: LOG_TARGET, + "Received 'ValueNotFound' for unexpected hash '{:?}'.", hash + ) + } }, Some(DhtEvent::ValuePut(hash)) => { if let Some(metrics) = &self.metrics { @@ -424,23 +479,9 @@ where } })?.ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; - let authorities = { - let block_id = BlockId::hash(self.client.info().best_hash); - // From the Dht we only get the hashed authority id. In order to retrieve the actual - // authority id and to ensure it is actually an authority, we match the hash against the - // hash of the authority id of all other authorities. - let authorities = self.client.runtime_api().authorities(&block_id)?; - self.addr_cache.retain_ids(&authorities); - authorities - .into_iter() - .map(|id| (hash_authority_id(id.as_ref()), id)) - .collect::>() - }; - - // Check if the event origins from an authority in the current authority set. - let authority_id: &AuthorityId = authorities - .get(&remote_key) - .ok_or(Error::MatchingHashedAuthorityIdWithAuthorityId)?; + let authority_id: AuthorityId = self.in_flight_lookups + .remove(&remote_key) + .ok_or(Error::ReceivingUnexpectedRecord)?; let local_peer_id = self.network.local_peer_id(); @@ -453,7 +494,7 @@ where let signature = AuthoritySignature::decode(&mut &signature[..]) .map_err(Error::EncodingDecodingScale)?; - if !AuthorityPair::verify(&signature, &addresses, authority_id) { + if !AuthorityPair::verify(&signature, &addresses, &authority_id) { return Err(Error::VerifyingDhtPayload); } @@ -489,10 +530,11 @@ where false // `protocol` is not a [`Protocol::P2p`], let's keep looking. })) + .take(MAX_ADDRESSES_PER_AUTHORITY) .collect(); if !remote_addresses.is_empty() { - self.addr_cache.insert(authority_id.clone(), remote_addresses); + self.addr_cache.insert(authority_id, remote_addresses); if let Some(metrics) = &self.metrics { metrics.known_authorities_count.set( self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX) @@ -503,12 +545,12 @@ where Ok(()) } - /// Retrieve our public keys within the current authority set. + /// Retrieve our public keys within the current and next authority set. // // A node might have multiple authority discovery keys within its keystore, e.g. an old one and - // one for the upcoming session. In addition it could be participating in the current authority - // set with two keys. The function does not return all of the local authority discovery public - // keys, but only the ones intersecting with the current authority set. + // one for the upcoming session. In addition it could be participating in the current and (/ or) + // next authority set with two keys. The function does not return all of the local authority + // discovery public keys, but only the ones intersecting with the current or next authority set. fn get_own_public_keys_within_authority_set( key_store: &BareCryptoStorePtr, client: &Client, @@ -519,14 +561,14 @@ where .collect::>(); let id = BlockId::hash(client.info().best_hash); - let current_authorities = client.runtime_api() + let authorities = client.runtime_api() .authorities(&id) .map_err(Error::CallingRuntime)? .into_iter() .map(std::convert::Into::into) .collect::>(); - let intersection = local_pub_keys.intersection(¤t_authorities) + let intersection = local_pub_keys.intersection(&authorities) .cloned() .map(std::convert::Into::into) .collect(); @@ -599,15 +641,15 @@ where } } - // Request addresses of authorities. + // Request addresses of authorities, refilling the pending lookups queue. if let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) { // Register waker of underlying task for next interval. while let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) {} - if let Err(e) = self.request_addresses_of_others() { + if let Err(e) = self.refill_pending_lookups_queue() { error!( target: LOG_TARGET, - "Failed to request addresses of authorities: {:?}", e, + "Failed to refill pending lookups queue: {:?}", e, ); } } @@ -641,6 +683,8 @@ where } } + self.start_new_lookups(); + Poll::Pending } } @@ -701,8 +745,9 @@ fn interval_at(start: Instant, duration: Duration) -> Interval { #[derive(Clone)] pub(crate) struct Metrics { publish: Counter, - amount_last_published: Gauge, - request: Counter, + amount_addresses_last_published: Gauge, + requests: Counter, + requests_pending: Gauge, dht_event_received: CounterVec, handle_value_found_event_failure: Counter, known_authorities_count: Gauge, @@ -719,7 +764,7 @@ impl Metrics { )?, registry, )?, - amount_last_published: register( + amount_addresses_last_published: register( Gauge::new( "authority_discovery_amount_external_addresses_last_published", "Number of external addresses published when authority discovery last \ @@ -727,7 +772,7 @@ impl Metrics { )?, registry, )?, - request: register( + requests: register( Counter::new( "authority_discovery_authority_addresses_requested_total", "Number of times authority discovery has requested external addresses of a \ @@ -735,6 +780,13 @@ impl Metrics { )?, registry, )?, + requests_pending: register( + Gauge::new( + "authority_discovery_authority_address_requests_pending", + "Number of pending authority address requests." + )?, + registry, + )?, dht_event_received: register( CounterVec::new( Opts::new( diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 68aadca7a7f30ebe62d71095b766d303a17566ef..f7b7dc41fee40a857bd7c3f5cc6db9fce0e589f9 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -168,6 +168,7 @@ sp_api::mock_impl_runtime_apis! { pub struct TestNetwork { peer_id: PeerId, + external_addresses: Vec, // Whenever functions on `TestNetwork` are called, the function arguments are added to the // vectors below. pub put_value_call: Arc)>>>, @@ -179,6 +180,10 @@ impl Default for TestNetwork { fn default() -> Self { TestNetwork { peer_id: PeerId::random(), + external_addresses: vec![ + "/ip6/2001:db8::/tcp/30333" + .parse().unwrap(), + ], put_value_call: Default::default(), get_value_call: Default::default(), set_priority_group_call: Default::default(), @@ -212,10 +217,45 @@ impl NetworkStateInfo for TestNetwork { } fn external_addresses(&self) -> Vec { - vec!["/ip6/2001:db8::/tcp/30333".parse().unwrap()] + self.external_addresses.clone() } } +fn build_dht_event( + addresses: Vec, + public_key: AuthorityId, + key_store: &BareCryptoStorePtr, +) -> (libp2p::kad::record::Key, Vec) { + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { + addresses: addresses.into_iter().map(|a| a.to_vec()).collect() + }.encode(&mut serialized_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let signature = key_store.read() + .sign_with( + key_types::AUTHORITY_DISCOVERY, + &public_key.clone().into(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing) + .unwrap(); + + let mut signed_addresses = vec![]; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses.clone(), + signature, + } + .encode(&mut signed_addresses) + .map_err(Error::EncodingProto) + .unwrap(); + + let key = hash_authority_id(&public_key.to_raw_vec()); + let value = signed_addresses; + (key, value) +} + #[test] fn new_registers_metrics() { let (_dht_event_tx, dht_event_rx) = channel(1000); @@ -242,8 +282,8 @@ fn new_registers_metrics() { } #[test] -fn request_addresses_of_others_triggers_dht_get_query() { - let _ = ::env_logger::try_init(); +fn triggers_dht_get_query() { + sp_tracing::try_init_simple(); let (_dht_event_tx, dht_event_rx) = channel(1000); // Generate authority keys @@ -257,7 +297,6 @@ fn request_addresses_of_others_triggers_dht_get_query() { let network: Arc = Arc::new(Default::default()); let key_store = KeyStore::new(); - let (_to_worker, from_service) = mpsc::channel(0); let mut worker = Worker::new( from_service, @@ -269,7 +308,12 @@ fn request_addresses_of_others_triggers_dht_get_query() { None, ); - worker.request_addresses_of_others().unwrap(); + worker.refill_pending_lookups_queue().unwrap(); + + futures::executor::block_on(futures::future::poll_fn(|cx| { + assert_eq!(Poll::Pending, worker.poll_unpin(cx)); + Poll::Ready(()) + })); // Expect authority discovery to request new records from the dht. assert_eq!(network.get_value_call.lock().unwrap().len(), 2); @@ -277,7 +321,7 @@ fn request_addresses_of_others_triggers_dht_get_query() { #[test] fn publish_discover_cycle() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); // Node A publishing its address. @@ -347,6 +391,9 @@ fn publish_discover_cycle() { dht_event_tx.try_send(dht_event).unwrap(); let f = |cx: &mut Context<'_>| -> Poll<()> { + worker.refill_pending_lookups_queue().unwrap(); + worker.start_new_lookups(); + // Make authority discovery handle the event. if let Poll::Ready(e) = worker.handle_dht_events(cx) { panic!("Unexpected error: {:?}", e); @@ -542,40 +589,11 @@ fn never_add_own_address_to_priority_group() { )) }; - let dht_event = { - let addresses = vec![ - sentry_multiaddr.to_vec(), - random_multiaddr.to_vec(), - ]; - - let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses } - .encode(&mut serialized_addresses) - .map_err(Error::EncodingProto) - .unwrap(); - - let signature = validator_key_store.read() - .sign_with( - key_types::AUTHORITY_DISCOVERY, - &validator_public.clone().into(), - serialized_addresses.as_slice(), - ) - .map_err(|_| Error::Signing) - .unwrap(); - - let mut signed_addresses = vec![]; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses) - .map_err(Error::EncodingProto) - .unwrap(); - - let key = hash_authority_id(&validator_public.to_raw_vec()); - let value = signed_addresses; - (key, value) - }; + let dht_event = build_dht_event( + vec![sentry_multiaddr, random_multiaddr.clone()], + validator_public.into(), + &validator_key_store, + ); let (_dht_event_tx, dht_event_rx) = channel(1); let sentry_test_api = Arc::new(TestApi { @@ -594,6 +612,9 @@ fn never_add_own_address_to_priority_group() { None, ); + sentry_worker.refill_pending_lookups_queue().unwrap(); + sentry_worker.start_new_lookups(); + sentry_worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); sentry_worker.set_priority_group().unwrap(); @@ -612,6 +633,51 @@ fn never_add_own_address_to_priority_group() { ); } +#[test] +fn limit_number_of_addresses_added_to_cache_per_authority() { + let remote_key_store = KeyStore::new(); + let remote_public = remote_key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); + + let addresses = (0..100).map(|_| { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + address.with(multiaddr::Protocol::P2p( + peer_id.into(), + )) + }).collect(); + + let dht_event = build_dht_event( + addresses, + remote_public.into(), + &remote_key_store, + ); + + let (_dht_event_tx, dht_event_rx) = channel(1); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + Arc::new(TestApi { authorities: vec![remote_public.into()] }), + Arc::new(TestNetwork::default()), + vec![], + dht_event_rx.boxed(), + Role::Sentry, + None, + ); + + worker.refill_pending_lookups_queue().unwrap(); + worker.start_new_lookups(); + + worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); + assert_eq!( + MAX_ADDRESSES_PER_AUTHORITY, + worker.addr_cache.get_addresses_by_authority_id(&remote_public.into()).unwrap().len(), + ); +} + #[test] fn do_not_cache_addresses_without_peer_id() { let remote_key_store = KeyStore::new(); @@ -629,40 +695,14 @@ fn do_not_cache_addresses_without_peer_id() { let multiaddr_without_peer_id: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - let dht_event = { - let addresses = vec![ - multiaddr_with_peer_id.to_vec(), - multiaddr_without_peer_id.to_vec(), - ]; - - let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses } - .encode(&mut serialized_addresses) - .map_err(Error::EncodingProto) - .unwrap(); - - let signature = remote_key_store.read() - .sign_with( - key_types::AUTHORITY_DISCOVERY, - &remote_public.clone().into(), - serialized_addresses.as_slice(), - ) - .map_err(|_| Error::Signing) - .unwrap(); - - let mut signed_addresses = vec![]; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses) - .map_err(Error::EncodingProto) - .unwrap(); - - let key = hash_authority_id(&remote_public.to_raw_vec()); - let value = signed_addresses; - (key, value) - }; + let dht_event = build_dht_event( + vec![ + multiaddr_with_peer_id.clone(), + multiaddr_without_peer_id, + ], + remote_public.into(), + &remote_key_store, + ); let (_dht_event_tx, dht_event_rx) = channel(1); let local_test_api = Arc::new(TestApi { @@ -683,6 +723,9 @@ fn do_not_cache_addresses_without_peer_id() { None, ); + local_worker.refill_pending_lookups_queue().unwrap(); + local_worker.start_new_lookups(); + local_worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); assert_eq!( @@ -691,3 +734,147 @@ fn do_not_cache_addresses_without_peer_id() { "Expect worker to only cache `Multiaddr`s with `PeerId`s.", ); } + +#[test] +fn addresses_to_publish_adds_p2p() { + let (_dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + + assert!(!matches!( + network.external_addresses().pop().unwrap().pop().unwrap(), + multiaddr::Protocol::P2p(_) + )); + + let (_to_worker, from_service) = mpsc::channel(0); + let worker = Worker::new( + from_service, + Arc::new(TestApi { + authorities: vec![], + }), + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(KeyStore::new()), + Some(prometheus_endpoint::Registry::new()), + ); + + assert!( + matches!( + worker.addresses_to_publish().next().unwrap().pop().unwrap(), + multiaddr::Protocol::P2p(_) + ), + "Expect `addresses_to_publish` to append `p2p` protocol component.", + ); +} + +/// Ensure [`Worker::addresses_to_publish`] does not add an additional `p2p` protocol component in +/// case one already exists. +#[test] +fn addresses_to_publish_respects_existing_p2p_protocol() { + let (_dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(TestNetwork { + external_addresses: vec![ + "/ip6/2001:db8::/tcp/30333/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC" + .parse().unwrap(), + ], + .. Default::default() + }); + + let (_to_worker, from_service) = mpsc::channel(0); + let worker = Worker::new( + from_service, + Arc::new(TestApi { + authorities: vec![], + }), + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(KeyStore::new()), + Some(prometheus_endpoint::Registry::new()), + ); + + assert_eq!( + network.external_addresses, worker.addresses_to_publish().collect::>(), + "Expected Multiaddr from `TestNetwork` to not be altered.", + ); +} + +#[test] +fn lookup_throttling() { + let remote_multiaddr = { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + + address.with(multiaddr::Protocol::P2p( + peer_id.into(), + )) + }; + let remote_key_store = KeyStore::new(); + let remote_public_keys: Vec = (0..20).map(|_| { + remote_key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap().into() + }).collect(); + let remote_hash_to_key = remote_public_keys.iter() + .map(|k| (hash_authority_id(k.as_ref()), k.clone())) + .collect::>(); + + + let (mut dht_event_tx, dht_event_rx) = channel(1); + let (_to_worker, from_service) = mpsc::channel(0); + let network = Arc::new(TestNetwork::default()); + let mut worker = Worker::new( + from_service, + Arc::new(TestApi { authorities: remote_public_keys.clone() }), + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Sentry, + None, + ); + + futures::executor::block_on(futures::future::poll_fn(|cx| { + worker.refill_pending_lookups_queue().unwrap(); + + // Assert worker to trigger MAX_IN_FLIGHT_LOOKUPS lookups. + assert_eq!(Poll::Pending, worker.poll_unpin(cx)); + assert_eq!(worker.pending_lookups.len(), remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS); + assert_eq!(worker.in_flight_lookups.len(), MAX_IN_FLIGHT_LOOKUPS); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); + + // Make first lookup succeed. + let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); + let remote_key: AuthorityId = remote_hash_to_key.get(&remote_hash).unwrap().clone(); + let dht_event = { + let (key, value) = build_dht_event(vec![remote_multiaddr.clone()], remote_key, &remote_key_store); + sc_network::DhtEvent::ValueFound(vec![(key, value)]) + }; + dht_event_tx.try_send(dht_event).expect("Channel has capacity of 1."); + + // Assert worker to trigger another lookup. + assert_eq!(Poll::Pending, worker.poll_unpin(cx)); + assert_eq!(worker.pending_lookups.len(), remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 1); + assert_eq!(worker.in_flight_lookups.len(), MAX_IN_FLIGHT_LOOKUPS); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); + + // Make second one fail. + let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); + let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); + dht_event_tx.try_send(dht_event).expect("Channel has capacity of 1."); + + // Assert worker to trigger another lookup. + assert_eq!(Poll::Pending, worker.poll_unpin(cx)); + assert_eq!(worker.pending_lookups.len(), remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 2); + assert_eq!(worker.in_flight_lookups.len(), MAX_IN_FLIGHT_LOOKUPS); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); + + worker.refill_pending_lookups_queue().unwrap(); + + // Assert worker to restock pending lookups and forget about in-flight lookups. + assert_eq!(worker.pending_lookups.len(), remote_public_keys.len()); + assert_eq!(worker.in_flight_lookups.len(), 0); + + Poll::Ready(()) + })); +} diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 6160a41fdef43c889991eabec914da9c423461a8..1b1d8921bcfb35736912d31dc6bd673eb0e08bd3 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-basic-authorship" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Basic implementation of block-authoring logic." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,21 +17,21 @@ codec = { package = "parity-scale-codec", version = "1.3.4" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } -sc-proposer-metrics = { version = "0.8.0-rc5", path = "../proposer-metrics" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} +sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } +sc-telemetry = { version = "2.0.0", path = "../telemetry" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sc-block-builder = { version = "0.8.0", path = "../block-builder" } +sc-proposer-metrics = { version = "0.8.0", path = "../proposer-metrics" } tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } [dev-dependencies] -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../client/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } parking_lot = "0.10.0" diff --git a/client/basic-authorship/README.md b/client/basic-authorship/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1a20593c09eaa159a81ddd05c89abe77b7c1b57f --- /dev/null +++ b/client/basic-authorship/README.md @@ -0,0 +1,32 @@ +Basic implementation of block-authoring logic. + +# Example + +```rust +// The first step is to create a `ProposerFactory`. +let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None); + +// From this factory, we create a `Proposer`. +let proposer = proposer_factory.init( + &client.header(&BlockId::number(0)).unwrap().unwrap(), +); + +// The proposer is created asynchronously. +let proposer = futures::executor::block_on(proposer).unwrap(); + +// This `Proposer` allows us to create a block proposition. +// The proposer will grab transactions from the transaction pool, and put them into the block. +let future = proposer.propose( + Default::default(), + Default::default(), + Duration::from_secs(2), + RecordProof::Yes, +); + +// We wait until the proposition is performed. +let block = futures::executor::block_on(future).unwrap(); +println!("Generated block: {:?}", block.block); +``` + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index ac1d0265fd9e26d163795c639a5366b8b81a71d8..0c3d289bbcbc152b42270342cba0ee4c5cdeef8b 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,29 +1,30 @@ [package] name = "sc-block-builder" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate block builder" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../primitives/block-builder" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-block-builder = { version = "2.0.0", path = "../../primitives/block-builder" } +sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } +sc-client-api = { version = "2.0.0", path = "../api" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } +sp-trie = { version = "2.0.0", path = "../../primitives/trie" } diff --git a/client/block-builder/README.md b/client/block-builder/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c691f6692abff918c5dfe2f849d3263119724f54 --- /dev/null +++ b/client/block-builder/README.md @@ -0,0 +1,9 @@ +Substrate block builder + +This crate provides the [`BlockBuilder`] utility and the corresponding runtime api +[`BlockBuilder`](sp_block_builder::BlockBuilder).Error + +The block builder utility is used in the node as an abstraction over the runtime api to +initialize a block, to push extrinsics and to finalize a block. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 5a4759cbf17ece4361f359c3096551d2b4d4181c..a73d809ba25235781cba06e62d1ceeefd3594153 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -1,23 +1,25 @@ [package] name = "sc-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate chain configurations." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-chain-spec-derive = { version = "2.0.0-rc5", path = "./derive" } +sc-chain-spec-derive = { version = "2.0.0", path = "./derive" } impl-trait-for-tuples = "0.1.3" -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sc-network = { version = "0.8.0", path = "../network" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-chain-spec = { version = "2.0.0-rc5", path = "../../primitives/chain-spec" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-chain-spec = { version = "2.0.0", path = "../../primitives/chain-spec" } +sc-telemetry = { version = "2.0.0", path = "../telemetry" } +codec = { package = "parity-scale-codec", version = "1.3.4" } diff --git a/client/chain-spec/README.md b/client/chain-spec/README.md new file mode 100644 index 0000000000000000000000000000000000000000..59a66aa5ace79855998debdb7c7b0857f501b26b --- /dev/null +++ b/client/chain-spec/README.md @@ -0,0 +1,92 @@ +Substrate chain configurations. + +This crate contains structs and utilities to declare +a runtime-specific configuration file (a.k.a chain spec). + +Basic chain spec type containing all required parameters is +[`ChainSpec`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.ChainSpec.html). It can be extended with +additional options that contain configuration specific to your chain. +Usually the extension is going to be an amalgamate of types exposed +by Substrate core modules. To allow the core modules to retrieve +their configuration from your extension you should use `ChainSpecExtension` +macro exposed by this crate. + +```rust +use std::collections::HashMap; +use sc_chain_spec::{GenericChainSpec, ChainSpecExtension}; + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecExtension)] +pub struct MyExtension { + pub known_blocks: HashMap, +} + +pub type MyChainSpec = GenericChainSpec; +``` + +Some parameters may require different values depending on the +current blockchain height (a.k.a. forks). You can use `ChainSpecGroup` +macro and provided [`Forks`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.Forks.html) structure to put +such parameters to your chain spec. +This will allow to override a single parameter starting at specific +block number. + +```rust +use sc_chain_spec::{Forks, ChainSpecGroup, ChainSpecExtension, GenericChainSpec}; + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] +pub struct ClientParams { + max_block_size: usize, + max_extrinsic_size: usize, +} + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] +pub struct PoolParams { + max_transaction_size: usize, +} + +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup, ChainSpecExtension)] +pub struct Extension { + pub client: ClientParams, + pub pool: PoolParams, +} + +pub type BlockNumber = u64; + +/// A chain spec supporting forkable `ClientParams`. +pub type MyChainSpec1 = GenericChainSpec>; + +/// A chain spec supporting forkable `Extension`. +pub type MyChainSpec2 = GenericChainSpec>; +``` + +It's also possible to have a set of parameters that is allowed to change +with block numbers (i.e. is forkable), and another set that is not subject to changes. +This is also possible by declaring an extension that contains `Forks` within it. + + +```rust +use serde::{Serialize, Deserialize}; +use sc_chain_spec::{Forks, GenericChainSpec, ChainSpecGroup, ChainSpecExtension}; + +#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] +pub struct ClientParams { + max_block_size: usize, + max_extrinsic_size: usize, +} + +#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] +pub struct PoolParams { + max_transaction_size: usize, +} + +#[derive(Clone, Debug, Serialize, Deserialize, ChainSpecExtension)] +pub struct Extension { + pub client: ClientParams, + #[forks] + pub pool: Forks, +} + +pub type MyChainSpec = GenericChainSpec; +``` + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index b7eb03d7fbb234be848ef8e408ce9e62990b34d6..6826168a206a5e9529b2decaac1d18ece966e7df 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec-derive" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 52414f8687c96ed97f56e84e1345d3c0f8423ecc..1fbf0419e20017cbca6287a0749603ee5b6e46ce 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . //! Substrate chain configurations. +#![warn(missing_docs)] use std::{borrow::Cow, fs::File, path::PathBuf, sync::Arc, collections::HashMap}; use serde::{Serialize, Deserialize}; @@ -26,6 +27,7 @@ use serde_json as json; use crate::{RuntimeGenesis, ChainType, extension::GetExtension, Properties}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; +use sp_runtime::traits::Block as BlockT; enum GenesisSource { File(PathBuf), @@ -157,6 +159,7 @@ struct ClientSpec { consensus_engine: (), #[serde(skip_serializing)] genesis: serde::de::IgnoredAny, + light_sync_state: Option, } /// A type denoting empty extensions. @@ -245,6 +248,7 @@ impl ChainSpec { extensions, consensus_engine: (), genesis: Default::default(), + light_sync_state: None, }; ChainSpec { @@ -257,6 +261,11 @@ impl ChainSpec { fn chain_type(&self) -> ChainType { self.client_spec.chain_type.clone() } + + /// Hardcode infomation to allow light clients to sync quickly into the chain spec. + fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) { + self.client_spec.light_sync_state = Some(light_sync_state); + } } impl ChainSpec { @@ -284,16 +293,15 @@ impl ChainSpec { } } -impl ChainSpec { - /// Dump to json string. - pub fn as_json(&self, raw: bool) -> Result { - #[derive(Serialize, Deserialize)] - struct Container { - #[serde(flatten)] - client_spec: ClientSpec, - genesis: Genesis, +#[derive(Serialize, Deserialize)] +struct JsonContainer { + #[serde(flatten)] + client_spec: ClientSpec, + genesis: Genesis, +} - }; +impl ChainSpec { + fn json_container(&self, raw: bool) -> Result, String> { let genesis = match (raw, self.genesis.resolve()?) { (true, Genesis::Runtime(g)) => { let storage = g.build_storage()?; @@ -313,10 +321,15 @@ impl ChainSpec { }, (_, genesis) => genesis, }; - let container = Container { + Ok(JsonContainer { client_spec: self.client_spec.clone(), genesis, - }; + }) + } + + /// Dump to json string. + pub fn as_json(&self, raw: bool) -> Result { + let container = self.json_container(raw)?; json::to_string_pretty(&container) .map_err(|e| format!("Error generating spec json: {}", e)) } @@ -378,6 +391,42 @@ where fn set_storage(&mut self, storage: Storage) { self.genesis = GenesisSource::Storage(storage); } + + fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) { + ChainSpec::set_light_sync_state(self, light_sync_state) + } +} + +/// Hardcoded infomation that allows light clients to sync quickly. +pub struct LightSyncState { + /// The header of the best finalized block. + pub header: ::Header, +} + +impl LightSyncState { + /// Convert into a `SerializableLightSyncState`. + pub fn to_serializable(&self) -> SerializableLightSyncState { + use codec::Encode; + + SerializableLightSyncState { + header: StorageData(self.header.encode()), + } + } + + /// Convert from a `SerializableLightSyncState`. + pub fn from_serializable(serialized: &SerializableLightSyncState) -> Result { + Ok(Self { + header: codec::Decode::decode(&mut &serialized.header.0[..])?, + }) + } +} + +/// The serializable form of `LightSyncState`. Created using `LightSyncState::serialize`. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct SerializableLightSyncState { + header: StorageData, } #[cfg(test)] diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 8901a9a682224991a3679f6e160ac0a0c36f8237..f5afe496f1980717f6588fe566aea036a25e27be 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -108,7 +108,9 @@ mod chain_spec; mod extension; -pub use chain_spec::{ChainSpec as GenericChainSpec, NoExtension}; +pub use chain_spec::{ + ChainSpec as GenericChainSpec, NoExtension, LightSyncState, SerializableLightSyncState, +}; pub use extension::{Group, Fork, Forks, Extension, GetExtension, get_extension}; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; pub use sp_chain_spec::{Properties, ChainType}; @@ -155,6 +157,8 @@ pub trait ChainSpec: BuildStorage + Send { /// /// This will be used as storage at genesis. fn set_storage(&mut self, storage: Storage); + /// Hardcode infomation to allow light clients to sync quickly into the chain spec. + fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState); } impl std::fmt::Debug for dyn ChainSpec { diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 85a1eb0fe0a4c2727c96364aed2064f8cc0a256a..dca91d18c3efce72e7a8b30ddacc94c9b1a5512e 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,19 +1,19 @@ [package] name = "sc-cli" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -env_logger = "0.7.0" log = "0.4.8" atty = "0.2.13" regex = "1.3.4" @@ -22,28 +22,37 @@ ansi_term = "0.12.1" lazy_static = "1.4.0" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.4" -fdlimit = "0.1.4" +fdlimit = "0.2.0" +libp2p = "0.28.1" +parity-scale-codec = "1.3.0" +hex = "0.4.2" +rand = "0.7.3" +bip39 = "0.6.0-beta.1" serde_json = "1.0.41" -sc-informant = { version = "0.8.0-rc5", path = "../informant" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../../primitives/panic-handler" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../service" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-rc5"} -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } +sc-keystore = { version = "2.0.0", path = "../keystore" } +sc-informant = { version = "0.8.0", path = "../informant" } +sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0", path = "../network" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sc-service = { version = "0.8.0", default-features = false, path = "../service" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0", path = "../telemetry" } +substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0"} +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } names = "0.11.0" structopt = "0.3.8" -sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } +sc-tracing = { version = "2.0.0", path = "../tracing" } chrono = "0.4.10" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } serde = "1.0.111" +tracing = "0.1.10" +tracing-log = "0.1.1" +tracing-subscriber = "0.2.10" [target.'cfg(not(target_os = "unknown"))'.dependencies] rpassword = "4.0.1" @@ -53,6 +62,8 @@ nix = "0.17.0" [dev-dependencies] tempfile = "3.1.0" +sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } [features] wasmtime = [ diff --git a/client/cli/README.md b/client/cli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2504dbb0c03b5f9525c580622445ac431fe61d6f --- /dev/null +++ b/client/cli/README.md @@ -0,0 +1,3 @@ +Substrate CLI library. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index db13fff76148e6acbfc74212e0c4a4cacc53e525..85400f2a27759fe3743173239fd44c67a39e9ab8 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -85,6 +85,23 @@ arg_enum! { } } +arg_enum! { + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum CryptoScheme { + Ed25519, + Sr25519, + Ecdsa, + } +} + +arg_enum! { + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum OutputType { + Json, + Text, + } +} + arg_enum! { /// How to execute blocks #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -155,8 +172,6 @@ arg_enum! { pub enum Database { // Facebooks RocksDB RocksDb, - // Subdb. https://github.com/paritytech/subdb/ - SubDb, // ParityDb. https://github.com/paritytech/parity-db/ ParityDb, } diff --git a/client/cli/src/commands/build_sync_spec_cmd.rs b/client/cli/src/commands/build_sync_spec_cmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..3f1bfce6a3290dc7fb4d8867c272c7558ba08e2a --- /dev/null +++ b/client/cli/src/commands/build_sync_spec_cmd.rs @@ -0,0 +1,113 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::error; +use crate::params::{SharedParams, NetworkParams}; +use crate::CliConfiguration; +use log::info; +use sc_network::config::build_multiaddr; +use sc_service::{config::{MultiaddrWithPeerId, NetworkConfiguration}, ChainSpec}; +use structopt::StructOpt; +use std::io::Write; +use std::sync::Arc; +use sp_runtime::traits::Block as BlockT; +use sc_service::chain_ops::build_light_sync_state; +use sc_service::NetworkStatusSinks; +use futures::{FutureExt, StreamExt}; +use futures::future::ready; + +/// The `build-sync-spec` command used to build a chain spec that contains a light client state +/// so that light clients can sync faster. +#[derive(Debug, StructOpt)] +pub struct BuildSyncSpecCmd { + /// Force raw genesis storage output. + #[structopt(long = "raw")] + pub raw: bool, + + /// Sync the chain using a full client first. + #[structopt(long)] + pub sync_first: bool, + + /// Disable adding the default bootnode to the specification. + /// + /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the + /// specification when no bootnode exists. + #[structopt(long = "disable-default-bootnode")] + pub disable_default_bootnode: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_params: NetworkParams, +} + +impl BuildSyncSpecCmd { + /// Run the build-sync-spec command + pub async fn run( + &self, + mut spec: Box, + network_config: NetworkConfiguration, + client: Arc, + network_status_sinks: NetworkStatusSinks, + ) -> error::Result<()> + where + B: BlockT, + CL: sp_blockchain::HeaderBackend, + { + if self.sync_first { + network_status_sinks.status_stream(std::time::Duration::from_secs(1)).filter(|status| { + ready(status.sync_state == sc_network::SyncState::Idle && status.num_sync_peers > 0) + }).into_future().map(drop).await; + } + + let light_sync_state = build_light_sync_state(client)?; + spec.set_light_sync_state(light_sync_state.to_serializable()); + + info!("Building chain spec"); + let raw_output = self.raw; + + if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { + let keys = network_config.node_key.into_keypair()?; + let peer_id = keys.public().into_peer_id(); + let addr = MultiaddrWithPeerId { + multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)], + peer_id, + }; + spec.add_boot_node(addr) + } + + let json = sc_service::chain_ops::build_spec(&*spec, raw_output)?; + if std::io::stdout().write_all(json.as_bytes()).is_err() { + let _ = std::io::stderr().write_all(b"Error writing to stdout\n"); + } + Ok(()) + } +} + +impl CliConfiguration for BuildSyncSpecCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } + + fn network_params(&self) -> Option<&NetworkParams> { + Some(&self.network_params) + } +} diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 118832a79d29d96ba255b0df8b92ea04b1091301..e175d498941b84f007c9f7fd461e5485a002b5f9 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use crate::params::{BlockNumber, DatabaseParams, PruningParams, SharedParams}; +use crate::params::{GenericNumber, DatabaseParams, PruningParams, SharedParams}; use crate::CliConfiguration; use log::info; use sc_service::{ @@ -44,13 +44,13 @@ pub struct ExportBlocksCmd { /// /// Default is 1. #[structopt(long = "from", value_name = "BLOCK")] - pub from: Option, + pub from: Option, /// Specify last block number. /// /// Default is best block. #[structopt(long = "to", value_name = "BLOCK")] - pub to: Option, + pub to: Option, /// Use binary output rather than JSON. #[structopt(long)] diff --git a/client/cli/src/commands/generate.rs b/client/cli/src/commands/generate.rs new file mode 100644 index 0000000000000000000000000000000000000000..4664e17551c17ed48f690a08819586ef1a4a522c --- /dev/null +++ b/client/cli/src/commands/generate.rs @@ -0,0 +1,90 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `generate` subcommand +use bip39::{MnemonicType, Mnemonic, Language}; +use structopt::StructOpt; +use crate::{ + utils::print_from_uri, KeystoreParams, Error, + with_crypto_scheme, NetworkSchemeFlag, OutputTypeFlag, CryptoSchemeFlag, +}; + +/// The `generate` command +#[derive(Debug, StructOpt)] +#[structopt(name = "generate", about = "Generate a random account")] +pub struct GenerateCmd { + /// The number of words in the phrase to generate. One of 12 (default), 15, 18, 21 and 24. + #[structopt(long, short = "w", value_name = "WORDS")] + words: Option, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_scheme: NetworkSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl GenerateCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let words = match self.words { + Some(words) => { + MnemonicType::for_word_count(words) + .map_err(|_| { + Error::Input("Invalid number of words given for phrase: must be 12/15/18/21/24".into()) + })? + }, + None => MnemonicType::Words12, + }; + let mnemonic = Mnemonic::new(words, Language::English); + let password = self.keystore_params.read_password()?; + let output = self.output_scheme.output_type.clone(); + + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + mnemonic.phrase(), + password, + self.network_scheme.network.clone(), + output + ) + ); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::GenerateCmd; + use structopt::StructOpt; + + #[test] + fn generate() { + let generate = GenerateCmd::from_iter(&["generate", "--password", "12345"]); + assert!(generate.run().is_ok()) + } +} diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad292e4712d840efa5f015acb6341a0805ba9ebc --- /dev/null +++ b/client/cli/src/commands/generate_node_key.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `generate-node-key` subcommand + +use crate::Error; +use structopt::StructOpt; +use std::{path::PathBuf, fs}; +use libp2p::identity::{ed25519 as libp2p_ed25519, PublicKey}; + +/// The `generate-node-key` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "generate-node-key", + about = "Generate a random node libp2p key, save it to \ + file or print it to stdout and print its peer ID to stderr" +)] +pub struct GenerateNodeKeyCmd { + /// Name of file to save secret key to. + /// + /// If not given, the secret key is printed to stdout. + #[structopt(long)] + file: Option, +} + +impl GenerateNodeKeyCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let keypair = libp2p_ed25519::Keypair::generate(); + let secret = keypair.secret(); + let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); + let secret_hex = hex::encode(secret.as_ref()); + + match &self.file { + Some(file) => fs::write(file, secret_hex)?, + None => print!("{}", secret_hex), + } + + eprintln!("{}", peer_id); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::Builder; + use std::io::Read; + + #[test] + fn generate_node_key() { + let mut file = Builder::new().prefix("keyfile").tempfile().unwrap(); + let file_path = file.path().display().to_string(); + let generate = + GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", &file_path]); + assert!(generate.run().is_ok()); + let mut buf = String::new(); + assert!(file.read_to_string(&mut buf).is_ok()); + assert!(hex::decode(buf).is_ok()); + } +} diff --git a/client/cli/src/commands/insert.rs b/client/cli/src/commands/insert.rs new file mode 100644 index 0000000000000000000000000000000000000000..5e4a0d42ffefc13fb260c29336b716e144c2ab08 --- /dev/null +++ b/client/cli/src/commands/insert.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `insert` subcommand + +use crate::{Error, KeystoreParams, CryptoSchemeFlag, SharedParams, utils, with_crypto_scheme}; +use structopt::StructOpt; +use sp_core::{crypto::KeyTypeId, traits::BareCryptoStore}; +use std::convert::TryFrom; +use sc_service::config::KeystoreConfig; +use sc_keystore::Store as KeyStore; +use sp_core::crypto::SecretString; + +/// The `insert` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "insert", + about = "Insert a key to the keystore of a node." +)] +pub struct InsertCmd { + /// The secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + #[structopt(long)] + suri: Option, + + /// Key type, examples: "gran", or "imon" + #[structopt(long)] + key_type: String, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl InsertCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let suri = utils::read_uri(self.suri.as_ref())?; + let base_path = self.shared_params.base_path.as_ref() + .ok_or_else(|| Error::Other("please supply base path".into()))?; + + let (keystore, public) = match self.keystore_params.keystore_config(base_path)? { + KeystoreConfig::Path { path, password } => { + let public = with_crypto_scheme!( + self.crypto_scheme.scheme, + to_vec(&suri, password.clone()) + )?; + let keystore = KeyStore::open(path, password) + .map_err(|e| format!("{}", e))?; + (keystore, public) + }, + _ => unreachable!("keystore_config always returns path and password; qed") + }; + + let key_type = KeyTypeId::try_from(self.key_type.as_str()) + .map_err(|_| { + Error::Other("Cannot convert argument to keytype: argument should be 4-character string".into()) + })?; + + keystore.write() + .insert_unknown(key_type, &suri, &public[..]) + .map_err(|e| Error::Other(format!("{:?}", e)))?; + + Ok(()) + } +} + +fn to_vec(uri: &str, pass: Option) -> Result, Error> { + let p = utils::pair_from_suri::

(uri, pass)?; + Ok(p.public().as_ref().to_vec()) +} diff --git a/client/cli/src/commands/inspect.rs b/client/cli/src/commands/inspect.rs new file mode 100644 index 0000000000000000000000000000000000000000..0c9e54d118533320492d0623fd0fbb5731c97c96 --- /dev/null +++ b/client/cli/src/commands/inspect.rs @@ -0,0 +1,97 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `inspect` subcommand + +use crate::{ + utils, KeystoreParams, with_crypto_scheme, NetworkSchemeFlag, + OutputTypeFlag, CryptoSchemeFlag, Error, +}; +use structopt::StructOpt; +/// The `inspect` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "inspect-key", + about = "Gets a public key and a SS58 address from the provided Secret URI" +)] +pub struct InspectKeyCmd { + /// A Key URI to be inspected. May be a secret seed, secret URI + /// (with derivation paths and password), SS58 or public URI. + /// + /// If the given value is a file, the file content will be used + /// as URI. + /// + /// If omitted, you will be prompted for the URI. + uri: Option, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_scheme: NetworkSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl InspectKeyCmd { + /// Run the command + pub fn run(&self) -> Result<(), Error> { + let uri = utils::read_uri(self.uri.as_ref())?; + let password = self.keystore_params.read_password()?; + + use utils::print_from_uri; + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + &uri, + password, + self.network_scheme.network.clone(), + self.output_scheme.output_type.clone() + ) + ); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use structopt::StructOpt; + + #[test] + fn inspect() { + let words = + "remember fiber forum demise paper uniform squirrel feel access exclude casual effort"; + let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; + + let inspect = + InspectKeyCmd::from_iter(&["inspect-key", words, "--password", "12345"]); + assert!(inspect.run().is_ok()); + + let inspect = InspectKeyCmd::from_iter(&["inspect-key", seed]); + assert!(inspect.run().is_ok()); + } +} diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs new file mode 100644 index 0000000000000000000000000000000000000000..be0b88589d5e9dfd2cc84d0a543ba2ccb4cd836f --- /dev/null +++ b/client/cli/src/commands/inspect_node_key.rs @@ -0,0 +1,75 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `inspect-node-key` subcommand + +use crate::{Error, NetworkSchemeFlag}; +use std::fs; +use libp2p::identity::{PublicKey, ed25519}; +use std::path::PathBuf; +use structopt::StructOpt; + +/// The `inspect-node-key` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "inspect-node-key", + about = "Print the peer ID corresponding to the node key in the given file." +)] +pub struct InspectNodeKeyCmd { + /// Name of file to read the secret key from. + #[structopt(long)] + file: PathBuf, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_scheme: NetworkSchemeFlag, +} + +impl InspectNodeKeyCmd { + /// runs the command + pub fn run(&self) -> Result<(), Error> { + let mut file_content = hex::decode(fs::read(&self.file)?) + .map_err(|_| "failed to decode secret as hex")?; + let secret = ed25519::SecretKey::from_bytes(&mut file_content) + .map_err(|_| "Bad node key file")?; + + let keypair = ed25519::Keypair::from(secret); + let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); + + println!("{}", peer_id); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use super::super::GenerateNodeKeyCmd; + + #[test] + fn inspect_node_key() { + let path = tempfile::tempdir().unwrap().into_path().join("node-id").into_os_string(); + let path = path.to_str().unwrap(); + let cmd = GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", path.clone()]); + + assert!(cmd.run().is_ok()); + + let cmd = InspectNodeKeyCmd::from_iter(&["inspect-node-key", "--file", path]); + assert!(cmd.run().is_ok()); + } +} diff --git a/client/cli/src/commands/key.rs b/client/cli/src/commands/key.rs new file mode 100644 index 0000000000000000000000000000000000000000..930acd7925ac6cd2936b6126425233ca6044c1b1 --- /dev/null +++ b/client/cli/src/commands/key.rs @@ -0,0 +1,62 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Key related CLI utilities + +use crate::Error; +use structopt::StructOpt; + +use super::{ + insert::InsertCmd, + inspect::InspectKeyCmd, + generate::GenerateCmd, + inspect_node_key::InspectNodeKeyCmd, + generate_node_key::GenerateNodeKeyCmd, +}; + +/// Key utilities for the cli. +#[derive(Debug, StructOpt)] +pub enum KeySubcommand { + /// Generate a random node libp2p key, save it to file or print it to stdout + /// and print its peer ID to stderr. + GenerateNodeKey(GenerateNodeKeyCmd), + + /// Generate a random account + Generate(GenerateCmd), + + /// Gets a public key and a SS58 address from the provided Secret URI + InspectKey(InspectKeyCmd), + + /// Print the peer ID corresponding to the node key in the given file + InspectNodeKey(InspectNodeKeyCmd), + + /// Insert a key to the keystore of a node. + Insert(InsertCmd), +} + +impl KeySubcommand { + /// run the key subcommands + pub fn run(&self) -> Result<(), Error> { + match self { + KeySubcommand::GenerateNodeKey(cmd) => cmd.run(), + KeySubcommand::Generate(cmd) => cmd.run(), + KeySubcommand::InspectKey(cmd) => cmd.run(), + KeySubcommand::Insert(cmd) => cmd.run(), + KeySubcommand::InspectNodeKey(cmd) => cmd.run(), + } + } +} diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 04cce66bef80d231a2b9d63509d1133914c7e5a1..899abf0c3d4374fba04d7ce962258604756e646c 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -16,402 +16,42 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . mod build_spec_cmd; +mod build_sync_spec_cmd; mod check_block_cmd; mod export_blocks_cmd; mod export_state_cmd; mod import_blocks_cmd; mod purge_chain_cmd; +mod sign; +mod verify; +mod vanity; mod revert_cmd; mod run_cmd; - -pub use self::build_spec_cmd::BuildSpecCmd; -pub use self::check_block_cmd::CheckBlockCmd; -pub use self::export_blocks_cmd::ExportBlocksCmd; -pub use self::export_state_cmd::ExportStateCmd; -pub use self::import_blocks_cmd::ImportBlocksCmd; -pub use self::purge_chain_cmd::PurgeChainCmd; -pub use self::revert_cmd::RevertCmd; -pub use self::run_cmd::RunCmd; -use std::fmt::Debug; -use structopt::StructOpt; - -/// All core commands that are provided by default. -/// -/// The core commands are split into multiple subcommands and `Run` is the default subcommand. From -/// the CLI user perspective, it is not visible that `Run` is a subcommand. So, all parameters of -/// `Run` are exported as main executable parameters. -#[derive(Debug, StructOpt)] -pub enum Subcommand { - /// Build a spec.json file, outputs to stdout. - BuildSpec(BuildSpecCmd), - - /// Export blocks to a file. - ExportBlocks(ExportBlocksCmd), - - /// Import blocks from file. - ImportBlocks(ImportBlocksCmd), - - /// Validate a single block. - CheckBlock(CheckBlockCmd), - - /// Revert chain to the previous state. - Revert(RevertCmd), - - /// Remove the whole chain data. - PurgeChain(PurgeChainCmd), - - /// Export state as raw chain spec. - ExportState(ExportStateCmd), -} - -// TODO: move to config.rs? -/// Macro that helps implement CliConfiguration on an enum of subcommand automatically -/// -/// # Example -/// -/// ``` -/// # #[macro_use] extern crate sc_cli; -/// -/// # struct EmptyVariant {} -/// -/// # impl sc_cli::CliConfiguration for EmptyVariant { -/// # fn shared_params(&self) -> &sc_cli::SharedParams { unimplemented!() } -/// # fn chain_id(&self, _: bool) -> sc_cli::Result { Ok("test-chain-id".to_string()) } -/// # } -/// -/// # fn main() { -/// enum Subcommand { -/// Variant1(EmptyVariant), -/// Variant2(EmptyVariant), -/// } -/// -/// substrate_cli_subcommands!( -/// Subcommand => Variant1, Variant2 -/// ); -/// -/// # use sc_cli::CliConfiguration; -/// # assert_eq!(Subcommand::Variant1(EmptyVariant {}).chain_id(false).unwrap(), "test-chain-id"); -/// -/// # } -/// ``` -/// -/// Which will expand to: -/// -/// ```ignore -/// impl CliConfiguration for Subcommand { -/// fn base_path(&self) -> Result> { -/// match self { -/// Subcommand::Variant1(cmd) => cmd.base_path(), -/// Subcommand::Variant2(cmd) => cmd.base_path(), -/// } -/// } -/// -/// fn is_dev(&self) -> Result { -/// match self { -/// Subcommand::Variant1(cmd) => cmd.is_dev(), -/// Subcommand::Variant2(cmd) => cmd.is_dev(), -/// } -/// } -/// -/// // ... -/// } -/// ``` -#[macro_export] -macro_rules! substrate_cli_subcommands { - ($enum:ident => $($variant:ident),*) => { - impl $crate::CliConfiguration for $enum { - fn shared_params(&self) -> &$crate::SharedParams { - match self { - $($enum::$variant(cmd) => cmd.shared_params()),* - } - } - - fn import_params(&self) -> Option<&$crate::ImportParams> { - match self { - $($enum::$variant(cmd) => cmd.import_params()),* - } - } - - fn pruning_params(&self) -> Option<&$crate::PruningParams> { - match self { - $($enum::$variant(cmd) => cmd.pruning_params()),* - } - } - - fn keystore_params(&self) -> Option<&$crate::KeystoreParams> { - match self { - $($enum::$variant(cmd) => cmd.keystore_params()),* - } - } - - fn network_params(&self) -> Option<&$crate::NetworkParams> { - match self { - $($enum::$variant(cmd) => cmd.network_params()),* - } - } - - fn offchain_worker_params(&self) -> Option<&$crate::OffchainWorkerParams> { - match self { - $($enum::$variant(cmd) => cmd.offchain_worker_params()),* - } - } - - fn database_params(&self) -> Option<&$crate::DatabaseParams> { - match self { - $($enum::$variant(cmd) => cmd.database_params()),* - } - } - - fn base_path(&self) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.base_path()),* - } - } - - fn is_dev(&self) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.is_dev()),* - } - } - - fn role(&self, is_dev: bool) -> $crate::Result<::sc_service::Role> { - match self { - $($enum::$variant(cmd) => cmd.role(is_dev)),* - } - } - - fn transaction_pool(&self) - -> $crate::Result<::sc_service::config::TransactionPoolOptions> { - match self { - $($enum::$variant(cmd) => cmd.transaction_pool()),* - } - } - - fn network_config( - &self, - chain_spec: &::std::boxed::Box, - is_dev: bool, - net_config_dir: ::std::path::PathBuf, - client_id: &str, - node_name: &str, - node_key: ::sc_service::config::NodeKeyConfig, - ) -> $crate::Result<::sc_service::config::NetworkConfiguration> { - match self { - $( - $enum::$variant(cmd) => cmd.network_config( - chain_spec, is_dev, net_config_dir, client_id, node_name, node_key - ) - ),* - } - } - - fn keystore_config(&self, base_path: &::std::path::PathBuf) - -> $crate::Result<::sc_service::config::KeystoreConfig> { - match self { - $($enum::$variant(cmd) => cmd.keystore_config(base_path)),* - } - } - - fn database_cache_size(&self) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.database_cache_size()),* - } - } - - fn database_config( - &self, - base_path: &::std::path::PathBuf, - cache_size: usize, - database: $crate::Database, - ) -> $crate::Result<::sc_service::config::DatabaseConfig> { - match self { - $($enum::$variant(cmd) => cmd.database_config(base_path, cache_size, database)),* - } - } - - fn database(&self) -> $crate::Result<::std::option::Option<$crate::Database>> { - match self { - $($enum::$variant(cmd) => cmd.database()),* - } - } - - fn state_cache_size(&self) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.state_cache_size()),* - } - } - - fn state_cache_child_ratio(&self) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.state_cache_child_ratio()),* - } - } - - fn pruning(&self, unsafe_pruning: bool, role: &::sc_service::Role) - -> $crate::Result<::sc_service::config::PruningMode> { - match self { - $($enum::$variant(cmd) => cmd.pruning(unsafe_pruning, role)),* - } - } - - fn chain_id(&self, is_dev: bool) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.chain_id(is_dev)),* - } - } - - fn init(&self) -> $crate::Result<()> { - match self { - $($enum::$variant(cmd) => cmd.init::()),* - } - } - - fn node_name(&self) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.node_name()),* - } - } - - fn wasm_method(&self) -> $crate::Result<::sc_service::config::WasmExecutionMethod> { - match self { - $($enum::$variant(cmd) => cmd.wasm_method()),* - } - } - - fn execution_strategies(&self, is_dev: bool, is_validator: bool) - -> $crate::Result<::sc_client_api::execution_extensions::ExecutionStrategies> { - match self { - $($enum::$variant(cmd) => cmd.execution_strategies(is_dev, is_validator)),* - } - } - - fn rpc_ipc(&self) -> $crate::Result<::std::option::Option<::std::string::String>> { - match self { - $($enum::$variant(cmd) => cmd.rpc_ipc()),* - } - } - - fn rpc_http(&self) -> $crate::Result<::std::option::Option<::std::net::SocketAddr>> { - match self { - $($enum::$variant(cmd) => cmd.rpc_http()),* - } - } - - fn rpc_ws(&self) -> $crate::Result<::std::option::Option<::std::net::SocketAddr>> { - match self { - $($enum::$variant(cmd) => cmd.rpc_ws()),* - } - } - - fn rpc_methods(&self) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.rpc_methods()),* - } - } - - fn rpc_ws_max_connections(&self) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.rpc_ws_max_connections()),* - } - } - - fn rpc_cors(&self, is_dev: bool) - -> $crate::Result<::std::option::Option<::std::vec::Vec>> { - match self { - $($enum::$variant(cmd) => cmd.rpc_cors(is_dev)),* - } - } - - fn prometheus_config(&self) - -> $crate::Result<::std::option::Option<::sc_service::config::PrometheusConfig>> { - match self { - $($enum::$variant(cmd) => cmd.prometheus_config()),* - } - } - - fn telemetry_endpoints( - &self, - chain_spec: &Box, - ) -> $crate::Result<::std::option::Option<::sc_service::config::TelemetryEndpoints>> { - match self { - $($enum::$variant(cmd) => cmd.telemetry_endpoints(chain_spec)),* - } - } - - fn telemetry_external_transport(&self) - -> $crate::Result<::std::option::Option<::sc_service::config::ExtTransport>> { - match self { - $($enum::$variant(cmd) => cmd.telemetry_external_transport()),* - } - } - - fn default_heap_pages(&self) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.default_heap_pages()),* - } - } - - fn offchain_worker( - &self, - role: &::sc_service::Role, - ) -> $crate::Result<::sc_service::config::OffchainWorkerConfig> { - match self { - $($enum::$variant(cmd) => cmd.offchain_worker(role)),* - } - } - - fn force_authoring(&self) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.force_authoring()),* - } - } - - fn disable_grandpa(&self) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.disable_grandpa()),* - } - } - - fn dev_key_seed(&self, is_dev: bool) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.dev_key_seed(is_dev)),* - } - } - - fn tracing_targets(&self) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.tracing_targets()),* - } - } - - fn tracing_receiver(&self) -> $crate::Result<::sc_service::TracingReceiver> { - match self { - $($enum::$variant(cmd) => cmd.tracing_receiver()),* - } - } - - fn node_key(&self, net_config_dir: &::std::path::PathBuf) - -> $crate::Result<::sc_service::config::NodeKeyConfig> { - match self { - $($enum::$variant(cmd) => cmd.node_key(net_config_dir)),* - } - } - - fn max_runtime_instances(&self) -> $crate::Result<::std::option::Option> { - match self { - $($enum::$variant(cmd) => cmd.max_runtime_instances()),* - } - } - - fn log_filters(&self) -> $crate::Result { - match self { - $($enum::$variant(cmd) => cmd.log_filters()),* - } - } - } - } -} - -substrate_cli_subcommands!( - Subcommand => BuildSpec, ExportBlocks, ImportBlocks, CheckBlock, Revert, PurgeChain, ExportState -); +mod generate_node_key; +mod generate; +mod insert; +mod inspect_node_key; +mod inspect; +mod key; +pub mod utils; + +pub use self::{ + build_spec_cmd::BuildSpecCmd, + build_sync_spec_cmd::BuildSyncSpecCmd, + check_block_cmd::CheckBlockCmd, + export_blocks_cmd::ExportBlocksCmd, + export_state_cmd::ExportStateCmd, + import_blocks_cmd::ImportBlocksCmd, + purge_chain_cmd::PurgeChainCmd, + sign::SignCmd, + generate::GenerateCmd, + insert::InsertCmd, + inspect::InspectKeyCmd, + generate_node_key::GenerateNodeKeyCmd, + inspect_node_key::InspectNodeKeyCmd, + key::KeySubcommand, + vanity::VanityCmd, + verify::VerifyCmd, + revert_cmd::RevertCmd, + run_cmd::RunCmd, +}; diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index bbfb0d2ff99a73b20571a724181744d78c9edeb3..b2e3c1bf8e2b674e4313253aa6abd2431701d587 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use crate::params::{BlockNumber, PruningParams, SharedParams}; +use crate::params::{GenericNumber, PruningParams, SharedParams}; use crate::CliConfiguration; use sc_service::chain_ops::revert_chain; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -32,7 +32,7 @@ use sc_client_api::{Backend, UsageProvider}; pub struct RevertCmd { /// Number of blocks to revert. #[structopt(default_value = "256")] - pub num: BlockNumber, + pub num: GenericNumber, #[allow(missing_docs)] #[structopt(flatten)] diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index de5589196f27fa0ecb6d2fa9b749f7e367f0cf66..019b760e5b4aefc394b01fb29e29ece2ee1956e8 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -382,7 +382,7 @@ impl CliConfiguration for RunCmd { Ok(self.shared_params.dev || self.force_authoring) } - fn prometheus_config(&self) -> Result> { + fn prometheus_config(&self, default_listen_port: u16) -> Result> { Ok(if self.no_prometheus { None } else { @@ -393,7 +393,10 @@ impl CliConfiguration for RunCmd { }; Some(PrometheusConfig::new_with_default_registry( - SocketAddr::new(interface.into(), self.prometheus_port.unwrap_or(9615)) + SocketAddr::new( + interface.into(), + self.prometheus_port.unwrap_or(default_listen_port), + ) )) }) } @@ -427,7 +430,7 @@ impl CliConfiguration for RunCmd { .into()) } - fn rpc_http(&self) -> Result> { + fn rpc_http(&self, default_listen_port: u16) -> Result> { let interface = rpc_interface( self.rpc_external, self.unsafe_rpc_external, @@ -435,22 +438,22 @@ impl CliConfiguration for RunCmd { self.validator )?; - Ok(Some(SocketAddr::new(interface, self.rpc_port.unwrap_or(9933)))) + Ok(Some(SocketAddr::new(interface, self.rpc_port.unwrap_or(default_listen_port)))) } fn rpc_ipc(&self) -> Result> { Ok(self.ipc_path.clone()) } - fn rpc_ws(&self) -> Result> { + fn rpc_ws(&self, default_listen_port: u16) -> Result> { let interface = rpc_interface( self.ws_external, self.unsafe_ws_external, self.rpc_methods, - self.validator + self.validator, )?; - Ok(Some(SocketAddr::new(interface, self.ws_port.unwrap_or(9944)))) + Ok(Some(SocketAddr::new(interface, self.ws_port.unwrap_or(default_listen_port)))) } fn rpc_methods(&self) -> Result { diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs new file mode 100644 index 0000000000000000000000000000000000000000..605fd5b12313f0c75fee8eef4951fafe9ee9f680 --- /dev/null +++ b/client/cli/src/commands/sign.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Implementation of the `sign` subcommand +use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag, KeystoreParams}; +use structopt::StructOpt; +use sp_core::crypto::SecretString; + +/// The `sign` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "sign", + about = "Sign a message, with a given (secret) key" +)] +pub struct SignCmd { + /// The secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + #[structopt(long)] + suri: Option, + + /// Message to sign, if not provided you will be prompted to + /// pass the message via STDIN + #[structopt(long)] + message: Option, + + /// The message on STDIN is hex-encoded data + #[structopt(long)] + hex: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + + +impl SignCmd { + /// Run the command + pub fn run(&self) -> error::Result<()> { + let message = utils::read_message(self.message.as_ref(), self.hex)?; + let suri = utils::read_uri(self.suri.as_ref())?; + let password = self.keystore_params.read_password()?; + + let signature = with_crypto_scheme!( + self.crypto_scheme.scheme, + sign(&suri, password, message) + )?; + + println!("{}", signature); + Ok(()) + } +} + +fn sign(suri: &str, password: Option, message: Vec) -> error::Result { + let pair = utils::pair_from_suri::

(suri, password)?; + Ok(format!("{}", hex::encode(pair.sign(&message)))) +} + +#[cfg(test)] +mod test { + use super::SignCmd; + use structopt::StructOpt; + + #[test] + fn sign() { + let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; + + let sign = SignCmd::from_iter(&[ + "sign", + "--suri", + seed, + "--message", + &seed[2..], + "--password", + "12345" + ]); + assert!(sign.run().is_ok()); + } +} diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..a3298b222ad2b9538fed3ef1959ec8f250455bce --- /dev/null +++ b/client/cli/src/commands/utils.rs @@ -0,0 +1,234 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! subcommand utilities +use std::{io::Read, path::PathBuf}; +use sp_core::{ + Pair, hexdisplay::HexDisplay, + crypto::{Ss58Codec, Ss58AddressFormat}, +}; +use sp_runtime::{MultiSigner, traits::IdentifyAccount}; +use crate::{OutputType, error::{self, Error}}; +use serde_json::json; +use sp_core::crypto::{SecretString, Zeroize, ExposeSecret}; + +/// Public key type for Runtime +pub type PublicFor

=

::Public; +/// Seed type for Runtime +pub type SeedFor

=

::Seed; + +/// helper method to fetch uri from `Option` either as a file or read from stdin +pub fn read_uri(uri: Option<&String>) -> error::Result { + let uri = if let Some(uri) = uri { + let file = PathBuf::from(&uri); + if file.is_file() { + std::fs::read_to_string(uri)? + .trim_end() + .to_owned() + } else { + uri.into() + } + } else { + rpassword::read_password_from_tty(Some("URI: "))? + }; + + Ok(uri) +} + +/// print formatted pair from uri +pub fn print_from_uri( + uri: &str, + password: Option, + network_override: Option, + output: OutputType, +) where Pair: sp_core::Pair, Pair::Public: Into { + let password = password.as_ref().map(|s| s.expose_secret().as_str()); + if let Ok((pair, seed)) = Pair::from_phrase(uri, password.clone()) { + let public_key = pair.public(); + let network_override = network_override.unwrap_or_default(); + + match output { + OutputType::Json => { + let json = json!({ + "secretPhrase": uri, + "secretSeed": format_seed::(seed), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": pair.public().into().into_account().to_ss58check_with_version(network_override), + }); + println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + }, + OutputType::Text => { + println!( + "Secret phrase `{}` is account:\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + format_seed::(seed), + format_public_key::(public_key.clone()), + format_account_id::(public_key), + pair.public().into().into_account().to_ss58check_with_version(network_override), + ); + }, + } + } else if let Ok((pair, seed)) = Pair::from_string_with_seed(uri, password.clone()) { + let public_key = pair.public(); + let network_override = network_override.unwrap_or_default(); + + match output { + OutputType::Json => { + let json = json!({ + "secretKeyUri": uri, + "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": pair.public().into().into_account().to_ss58check_with_version(network_override), + }); + println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + }, + OutputType::Text => { + println!( + "Secret Key URI `{}` is account:\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, + format_public_key::(public_key.clone()), + format_account_id::(public_key), + pair.public().into().into_account().to_ss58check_with_version(network_override), + ); + }, + } + } else if let Ok((public_key, network)) = Pair::Public::from_string_with_version(uri) { + let network_override = network_override.unwrap_or(network); + + match output { + OutputType::Json => { + let json = json!({ + "publicKeyUri": uri, + "networkId": String::from(network_override), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key.clone()), + "ss58Address": public_key.to_ss58check_with_version(network_override), + }); + println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + }, + OutputType::Text => { + println!( + "Public Key URI `{}` is account:\n \ + Network ID/version: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", + uri, + String::from(network_override), + format_public_key::(public_key.clone()), + format_account_id::(public_key.clone()), + public_key.to_ss58check_with_version(network_override), + ); + }, + } + } else { + println!("Invalid phrase/URI given"); + } +} + +/// generate a pair from suri +pub fn pair_from_suri(suri: &str, password: Option) -> Result { + let result = if let Some(pass) = password { + let mut pass_str = pass.expose_secret().clone(); + let pair = P::from_string(suri, Some(&pass_str)); + pass_str.zeroize(); + pair + } else { + P::from_string(suri, None) + }; + + Ok(result.map_err(|err| format!("Invalid phrase {:?}", err))?) +} + +/// formats seed as hex +pub fn format_seed(seed: SeedFor

) -> String { + format!("0x{}", HexDisplay::from(&seed.as_ref())) +} + +/// formats public key as hex +fn format_public_key(public_key: PublicFor

) -> String { + format!("0x{}", HexDisplay::from(&public_key.as_ref())) +} + +/// formats public key as accountId as hex +fn format_account_id(public_key: PublicFor

) -> String + where + PublicFor

: Into, +{ + format!("0x{}", HexDisplay::from(&public_key.into().into_account().as_ref())) +} + +/// helper method for decoding hex +pub fn decode_hex>(message: T) -> Result, Error> { + let mut message = message.as_ref(); + if message[..2] == [b'0', b'x'] { + message = &message[2..] + } + hex::decode(message) + .map_err(|e| Error::Other(format!("Invalid hex ({})", e))) +} + +/// checks if message is Some, otherwise reads message from stdin and optionally decodes hex +pub fn read_message(msg: Option<&String>, should_decode: bool) -> Result, Error> { + let mut message = vec![]; + match msg { + Some(m) => { + message = decode_hex(m)?; + }, + None => { + std::io::stdin().lock().read_to_end(&mut message)?; + if should_decode { + message = decode_hex(&message)?; + } + } + } + Ok(message) +} + + +/// Allows for calling $method with appropriate crypto impl. +#[macro_export] +macro_rules! with_crypto_scheme { + ($scheme:expr, $method:ident($($params:expr),*)) => { + with_crypto_scheme!($scheme, $method<>($($params),*)) + }; + ($scheme:expr, $method:ident<$($generics:ty),*>($($params:expr),*)) => { + match $scheme { + $crate::CryptoScheme::Ecdsa => { + $method::($($params),*) + } + $crate::CryptoScheme::Sr25519 => { + $method::($($params),*) + } + $crate::CryptoScheme::Ed25519 => { + $method::($($params),*) + } + } + }; +} diff --git a/bin/utils/subkey/src/vanity.rs b/client/cli/src/commands/vanity.rs similarity index 56% rename from bin/utils/subkey/src/vanity.rs rename to client/cli/src/commands/vanity.rs index d09aeeef25a47a79ff434035a4d43a20d88c53a6..e6f923f73c45c74aa4563f64aaf10fedcf580ae5 100644 --- a/bin/utils/subkey/src/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -5,7 +5,7 @@ // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -16,9 +16,97 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::{PublicOf, PublicT, Crypto}; -use sp_core::Pair; +//! implementation of the `vanity` subcommand + +use crate::{ + error, utils, with_crypto_scheme, + CryptoSchemeFlag, NetworkSchemeFlag, OutputTypeFlag, +}; +use sp_core::crypto::Ss58Codec; +use structopt::StructOpt; use rand::{rngs::OsRng, RngCore}; +use sp_runtime::traits::IdentifyAccount; + +/// The `vanity` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "vanity", + about = "Generate a seed that provides a vanity address" +)] +pub struct VanityCmd { + /// Desired pattern + #[structopt(long, parse(try_from_str = assert_non_empty_string))] + pattern: String, + + #[allow(missing_docs)] + #[structopt(flatten)] + network_scheme: NetworkSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + crypto_scheme: CryptoSchemeFlag, +} + +impl VanityCmd { + /// Run the command + pub fn run(&self) -> error::Result<()> { + let formated_seed = with_crypto_scheme!(self.crypto_scheme.scheme, generate_key(&self.pattern))?; + use utils::print_from_uri; + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + &formated_seed, + None, + self.network_scheme.network.clone(), + self.output_scheme.output_type.clone() + ) + ); + Ok(()) + } +} + +/// genertae a key based on given pattern +fn generate_key(desired: &str) -> Result + where + Pair: sp_core::Pair, + Pair::Public: IdentifyAccount, + ::AccountId: Ss58Codec, +{ + println!("Generating key containing pattern '{}'", desired); + + let top = 45 + (desired.len() * 48); + let mut best = 0; + let mut seed = Pair::Seed::default(); + let mut done = 0; + + loop { + if done % 100000 == 0 { + OsRng.fill_bytes(seed.as_mut()); + } else { + next_seed(seed.as_mut()); + } + + let p = Pair::from_seed(&seed); + let ss58 = p.public().into_account().to_ss58check(); + let score = calculate_score(&desired, &ss58); + if score > best || desired.len() < 2 { + best = score; + if best >= top { + println!("best: {} == top: {}", best, top); + return Ok(utils::format_seed::(seed.clone())); + } + } + done += 1; + + if done % good_waypoint(done) == 0 { + println!("{} keys searched; best is {}/{} complete", done, best, top); + } + } +} fn good_waypoint(done: u64) -> u64 { match done { @@ -43,14 +131,6 @@ fn next_seed(seed: &mut [u8]) { } } -/// A structure used to carry both Pair and seed. -/// This should usually NOT been used. If unsure, use Pair. -pub(super) struct KeyPair { - pub pair: C::Pair, - pub seed: ::Seed, - pub score: usize, -} - /// Calculate the score of a key based on the desired /// input. fn calculate_score(_desired: &str, key: &str) -> usize { @@ -64,77 +144,40 @@ fn calculate_score(_desired: &str, key: &str) -> usize { 0 } -/// Validate whether the char is allowed to be used in base58. -/// num 0, lower l, upper I and O are not allowed. -fn validate_base58(c :char) -> bool { - c.is_alphanumeric() && !"0lIO".contains(c) -} - -pub(super) fn generate_key(desired: &str) -> Result, &'static str> where - PublicOf: PublicT, -{ - if desired.is_empty() { - return Err("Pattern must not be empty"); - } - - if !desired.chars().all(validate_base58) { - return Err("Pattern can only contains valid characters in base58 \ - (all alphanumeric except for 0, l, I and O)"); - } - - eprintln!("Generating key containing pattern '{}'", desired); - - let top = 45 + (desired.len() * 48); - let mut best = 0; - let mut seed = ::Seed::default(); - let mut done = 0; - - loop { - if done % 100000 == 0 { - OsRng.fill_bytes(seed.as_mut()); - } else { - next_seed(seed.as_mut()); - } - - let p = C::Pair::from_seed(&seed); - let ss58 = C::ss58_from_pair(&p); - let score = calculate_score(&desired, &ss58); - if score > best || desired.len() < 2 { - best = score; - let keypair = KeyPair { - pair: p, - seed: seed.clone(), - score: score, - }; - if best >= top { - eprintln!("best: {} == top: {}", best, top); - return Ok(keypair); - } - } - done += 1; - - if done % good_waypoint(done) == 0 { - eprintln!("{} keys searched; best is {}/{} complete", done, best, top); - } +/// checks that `pattern` is non-empty +fn assert_non_empty_string(pattern: &str) -> Result { + if pattern.is_empty() { + Err("Pattern must not be empty") + } else { + Ok(pattern.to_string()) } } + #[cfg(test)] mod tests { - use super::super::Ed25519; use super::*; use sp_core::{crypto::Ss58Codec, Pair}; + use sp_core::sr25519; #[cfg(feature = "bench")] use test::Bencher; + use structopt::StructOpt; + + #[test] + fn vanity() { + let vanity = VanityCmd::from_iter(&["vanity", "--pattern", "j"]); + assert!(vanity.run().is_ok()); + } #[test] fn test_generation_with_single_char() { - assert!(generate_key::("j") - .unwrap() - .pair - .public() - .to_ss58check() - .contains("j")); + let seed = generate_key::("j").unwrap(); + assert!( + sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + .unwrap() + .public() + .to_ss58check() + .contains("j")); } #[test] @@ -175,22 +218,6 @@ mod tests { ); } - #[test] - fn test_invalid_pattern() { - assert!(generate_key::("").is_err()); - assert!(generate_key::("0").is_err()); - assert!(generate_key::("l").is_err()); - assert!(generate_key::("I").is_err()); - assert!(generate_key::("O").is_err()); - assert!(generate_key::("!").is_err()); - } - - #[test] - fn test_valid_pattern() { - assert!(generate_key::("o").is_ok()); - assert!(generate_key::("L").is_ok()); - } - #[cfg(feature = "bench")] #[bench] fn bench_paranoiac(b: &mut Bencher) { diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad16c11d5e4415dce429aab64663f10e0cfb71ce --- /dev/null +++ b/client/cli/src/commands/verify.rs @@ -0,0 +1,104 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! implementation of the `verify` subcommand + +use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag}; +use sp_core::{Public, crypto::Ss58Codec}; +use structopt::StructOpt; + +/// The `verify` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "verify", + about = "Verify a signature for a message, provided on STDIN, with a given (public or secret) key" +)] +pub struct VerifyCmd { + /// Signature, hex-encoded. + sig: String, + + /// The public or secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + uri: Option, + + /// Message to verify, if not provided you will be prompted to + /// pass the message via STDIN + #[structopt(long)] + message: Option, + + /// The message on STDIN is hex-encoded data + #[structopt(long)] + hex: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl VerifyCmd { + /// Run the command + pub fn run(&self) -> error::Result<()> { + let message = utils::read_message(self.message.as_ref(), self.hex)?; + let sig_data = utils::decode_hex(&self.sig)?; + let uri = utils::read_uri(self.uri.as_ref())?; + let uri = if uri.starts_with("0x") { + &uri[2..] + } else { + &uri + }; + + with_crypto_scheme!( + self.crypto_scheme.scheme, + verify(sig_data, message, uri) + ) + } +} + +fn verify(sig_data: Vec, message: Vec, uri: &str) -> error::Result<()> + where + Pair: sp_core::Pair, + Pair::Signature: Default + AsMut<[u8]>, +{ + let mut signature = Pair::Signature::default(); + if sig_data.len() != signature.as_ref().len() { + return Err(error::Error::Other(format!( + "signature has an invalid length. read {} bytes, expected {} bytes", + sig_data.len(), + signature.as_ref().len(), + ))); + } + signature.as_mut().copy_from_slice(&sig_data); + + let pubkey = if let Ok(pubkey_vec) = hex::decode(uri) { + Pair::Public::from_slice(pubkey_vec.as_slice()) + } else { + Pair::Public::from_string(uri) + .map_err(|_| { + error::Error::Other(format!("Invalid URI; expecting either a secret URI or a public URI.")) + })? + }; + + if Pair::verify(&signature, &message, &pubkey) { + println!("Signature verifies correctly."); + } else { + return Err(error::Error::Other("Signature invalid.".into())) + } + + Ok(()) +} diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index efda45a0eca9acfcabc1d7e50021eebcfb21fad5..43b755100244f28e9786a4bc4e2f48b06b650afe 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -24,6 +24,7 @@ use crate::{ init_logger, DatabaseParams, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, OffchainWorkerParams, PruningParams, SharedParams, SubstrateCli, }; +use log::warn; use names::{Generator, Name}; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::config::{ @@ -38,11 +39,50 @@ use std::path::PathBuf; /// The maximum number of characters for a node name. pub(crate) const NODE_NAME_MAX_LENGTH: usize = 64; -/// default sub directory to store network config +/// Default sub directory to store network config. pub(crate) const DEFAULT_NETWORK_CONFIG_PATH: &'static str = "network"; +/// The recommended open file descriptor limit to be configured for the process. +const RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT: u64 = 10_000; + +/// Default configuration values used by Substrate +/// +/// These values will be used by [`CliConfiguritation`] to set +/// default values for e.g. the listen port or the RPC port. +pub trait DefaultConfigurationValues { + /// The port Substrate should listen on for p2p connections. + /// + /// By default this is `30333`. + fn p2p_listen_port() -> u16 { + 30333 + } + + /// The port Substrate should listen on for websocket connections. + /// + /// By default this is `9944`. + fn rpc_ws_listen_port() -> u16 { + 9944 + } + + /// The port Substrate should listen on for http connections. + /// + /// By default this is `9933`. + fn rpc_http_listen_port() -> u16 { + 9933 + } + + /// The port Substrate should listen on for prometheus connections. + /// + /// By default this is `9615`. + fn prometheus_listen_port() -> u16 { + 9615 + } +} + +impl DefaultConfigurationValues for () {} + /// A trait that allows converting an object to a Configuration -pub trait CliConfiguration: Sized { +pub trait CliConfiguration: Sized { /// Get the SharedParams for this object fn shared_params(&self) -> &SharedParams; @@ -122,6 +162,7 @@ pub trait CliConfiguration: Sized { client_id: &str, node_name: &str, node_key: NodeKeyConfig, + default_listen_port: u16, ) -> Result { Ok(if let Some(network_params) = self.network_params() { network_params.network_config( @@ -131,6 +172,7 @@ pub trait CliConfiguration: Sized { client_id, node_name, node_key, + default_listen_port, ) } else { NetworkConfiguration::new( @@ -180,9 +222,6 @@ pub trait CliConfiguration: Sized { path: base_path.join("db"), cache_size, }, - Database::SubDb => DatabaseConfig::SubDb { - path: base_path.join("subdb"), - }, Database::ParityDb => DatabaseConfig::ParityDb { path: base_path.join("paritydb"), }, @@ -257,22 +296,22 @@ pub trait CliConfiguration: Sized { /// Get the RPC HTTP address (`None` if disabled). /// /// By default this is `None`. - fn rpc_http(&self) -> Result> { - Ok(Default::default()) + fn rpc_http(&self, _default_listen_port: u16) -> Result> { + Ok(None) } /// Get the RPC IPC path (`None` if disabled). /// /// By default this is `None`. fn rpc_ipc(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Get the RPC websocket address (`None` if disabled). /// /// By default this is `None`. - fn rpc_ws(&self) -> Result> { - Ok(Default::default()) + fn rpc_ws(&self, _default_listen_port: u16) -> Result> { + Ok(None) } /// Returns the RPC method set to expose. @@ -287,12 +326,12 @@ pub trait CliConfiguration: Sized { /// /// By default this is `None`. fn rpc_ws_max_connections(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Get the RPC cors (`None` if disabled) /// - /// By default this is `None`. + /// By default this is `Some(Vec::new())`. fn rpc_cors(&self, _is_dev: bool) -> Result>> { Ok(Some(Vec::new())) } @@ -300,8 +339,8 @@ pub trait CliConfiguration: Sized { /// Get the prometheus configuration (`None` if disabled) /// /// By default this is `None`. - fn prometheus_config(&self) -> Result> { - Ok(Default::default()) + fn prometheus_config(&self, _default_listen_port: u16) -> Result> { + Ok(None) } /// Get the telemetry endpoints (if any) @@ -318,14 +357,14 @@ pub trait CliConfiguration: Sized { /// /// By default this is `None`. fn telemetry_external_transport(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Get the default value for heap pages /// /// By default this is `None`. fn default_heap_pages(&self) -> Result> { - Ok(Default::default()) + Ok(None) } /// Returns an offchain worker config wrapped in `Ok(_)` @@ -445,6 +484,7 @@ pub trait CliConfiguration: Sized { client_id.as_str(), self.node_name()?.as_str(), node_key, + DCV::p2p_listen_port(), )?, keystore: self.keystore_config(&config_dir)?, database: self.database_config(&config_dir, database_cache_size, database)?, @@ -453,13 +493,13 @@ pub trait CliConfiguration: Sized { pruning: self.pruning(unsafe_pruning, &role)?, wasm_method: self.wasm_method()?, execution_strategies: self.execution_strategies(is_dev, is_validator)?, - rpc_http: self.rpc_http()?, - rpc_ws: self.rpc_ws()?, + rpc_http: self.rpc_http(DCV::rpc_http_listen_port())?, + rpc_ws: self.rpc_ws(DCV::rpc_ws_listen_port())?, rpc_ipc: self.rpc_ipc()?, rpc_methods: self.rpc_methods()?, rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_cors: self.rpc_cors(is_dev)?, - prometheus_config: self.prometheus_config()?, + prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?, telemetry_external_transport: self.telemetry_external_transport()?, default_heap_pages: self.default_heap_pages()?, @@ -488,20 +528,33 @@ pub trait CliConfiguration: Sized { Ok(self.shared_params().log_filters().join(",")) } - /// Initialize substrate. This must be done only once. + /// Initialize substrate. This must be done only once per process. /// /// This method: /// - /// 1. Set the panic handler - /// 2. Raise the FD limit - /// 3. Initialize the logger + /// 1. Sets the panic handler + /// 2. Initializes the logger + /// 3. Raises the FD limit fn init(&self) -> Result<()> { let logger_pattern = self.log_filters()?; + let tracing_receiver = self.tracing_receiver()?; + let tracing_targets = self.tracing_targets()?; sp_panic_handler::set(&C::support_url(), &C::impl_version()); - fdlimit::raise_fd_limit(); - init_logger(&logger_pattern); + if let Err(e) = init_logger(&logger_pattern, tracing_receiver, tracing_targets) { + log::warn!("💬 Problem initializing global logging framework: {:}", e) + } + + if let Some(new_limit) = fdlimit::raise_fd_limit() { + if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + warn!( + "Low open file descriptor limit configured for the process. \ + Current value: {:?}, recommended value: {:?}.", + new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + ); + } + } Ok(()) } diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index f091354be154bbd62856143ed9c23e8c2444c82c..7404d31fcf7bbdef22098b61a8eacde696e69e15 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -18,6 +18,8 @@ //! Initialization errors. + + /// Result type alias for the CLI. pub type Result = std::result::Result; @@ -32,6 +34,8 @@ pub enum Error { Service(sc_service::Error), /// Client error Client(sp_blockchain::Error), + /// scale codec error + Codec(parity_scale_codec::Error), /// Input error #[from(ignore)] Input(String), @@ -65,6 +69,7 @@ impl std::error::Error for Error { Error::Cli(ref err) => Some(err), Error::Service(ref err) => Some(err), Error::Client(ref err) => Some(err), + Error::Codec(ref err) => Some(err), Error::Input(_) => None, Error::InvalidListenMultiaddress => None, Error::Other(_) => None, diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index f940ab0b95d71b194079597c826340459838de96..f16d02cab51d51a77ce8ff535539ad9526ff5b7e 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -21,7 +21,7 @@ #![warn(missing_docs)] #![warn(unused_extern_crates)] -mod arg_enums; +pub mod arg_enums; mod commands; mod config; mod error; @@ -32,10 +32,7 @@ pub use arg_enums::*; pub use commands::*; pub use config::*; pub use error::*; -use lazy_static::lazy_static; -use log::info; pub use params::*; -use regex::Regex; pub use runner::*; use sc_service::{Configuration, TaskExecutor}; pub use sc_service::{ChainSpec, Role}; @@ -46,6 +43,7 @@ use structopt::{ clap::{self, AppSettings}, StructOpt, }; +use tracing_subscriber::layer::SubscriberExt; /// Substrate client CLI /// @@ -209,7 +207,7 @@ pub trait SubstrateCli: Sized { } /// Only create a Configuration for the command provided in argument - fn create_configuration( + fn create_configuration, DVC: DefaultConfigurationValues>( &self, command: &T, task_executor: TaskExecutor, @@ -228,79 +226,76 @@ pub trait SubstrateCli: Sized { fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion; } -/// Initialize the logger -pub fn init_logger(pattern: &str) { - use ansi_term::Colour; - - let mut builder = env_logger::Builder::new(); - // Disable info logging by default for some modules: - builder.filter(Some("ws"), log::LevelFilter::Off); - builder.filter(Some("yamux"), log::LevelFilter::Off); - builder.filter(Some("cranelift_codegen"), log::LevelFilter::Off); - builder.filter(Some("hyper"), log::LevelFilter::Warn); - builder.filter(Some("cranelift_wasm"), log::LevelFilter::Warn); - // Always log the special target `sc_tracing`, overrides global level - builder.filter(Some("sc_tracing"), log::LevelFilter::Trace); - // Enable info for others. - builder.filter(None, log::LevelFilter::Info); +/// Initialize the global logger +/// +/// This sets various global logging and tracing instances and thus may only be called once. +pub fn init_logger( + pattern: &str, + tracing_receiver: sc_tracing::TracingReceiver, + tracing_targets: Option, +) -> std::result::Result<(), String> { + if let Err(e) = tracing_log::LogTracer::init() { + return Err(format!( + "Registering Substrate logger failed: {:}!", e + )) + } + + let mut env_filter = tracing_subscriber::EnvFilter::default() + // Disable info logging by default for some modules. + .add_directive("ws=off".parse().expect("provided directive is valid")) + .add_directive("yamux=off".parse().expect("provided directive is valid")) + .add_directive("cranelift_codegen=off".parse().expect("provided directive is valid")) + // Set warn logging by default for some modules. + .add_directive("cranelife_wasm=warn".parse().expect("provided directive is valid")) + .add_directive("hyper=warn".parse().expect("provided directive is valid")) + // Always log the special target `sc_tracing`, overrides global level. + .add_directive("sc_tracing=trace".parse().expect("provided directive is valid")) + // Enable info for others. + .add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()); if let Ok(lvl) = std::env::var("RUST_LOG") { - builder.parse_filters(&lvl); + if lvl != "" { + // We're not sure if log or tracing is available at this moment, so silently ignore the + // parse error. + if let Ok(directive) = lvl.parse() { + env_filter = env_filter.add_directive(directive); + } + } + } + + if pattern != "" { + // We're not sure if log or tracing is available at this moment, so silently ignore the + // parse error. + if let Ok(directive) = pattern.parse() { + env_filter = env_filter.add_directive(directive); + } } - builder.parse_filters(pattern); let isatty = atty::is(atty::Stream::Stderr); let enable_color = isatty; - builder.format(move |buf, record| { - let now = time::now(); - let timestamp = - time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); - - let mut output = if log::max_level() <= log::LevelFilter::Info { - format!( - "{} {}", - Colour::Black.bold().paint(timestamp), - record.args(), - ) - } else { - let name = ::std::thread::current() - .name() - .map_or_else(Default::default, |x| { - format!("{}", Colour::Blue.bold().paint(x)) - }); - let millis = (now.tm_nsec as f32 / 1000000.0).floor() as usize; - let timestamp = format!("{}.{:03}", timestamp, millis); - format!( - "{} {} {} {} {}", - Colour::Black.bold().paint(timestamp), - name, - record.level(), - record.target(), - record.args() - ) - }; - - if !isatty && record.level() <= log::Level::Info && atty::is(atty::Stream::Stdout) { - // duplicate INFO/WARN output to console - println!("{}", output); + let subscriber = tracing_subscriber::FmtSubscriber::builder() + .with_env_filter(env_filter) + .with_target(false) + .with_ansi(enable_color) + .with_writer(std::io::stderr) + .compact() + .finish(); + + if let Some(tracing_targets) = tracing_targets { + let profiling = sc_tracing::ProfilingLayer::new(tracing_receiver, &tracing_targets); + + if let Err(e) = tracing::subscriber::set_global_default(subscriber.with(profiling)) { + return Err(format!( + "Registering Substrate tracing subscriber failed: {:}!", e + )) } - - if !enable_color { - output = kill_color(output.as_ref()); + } else { + if let Err(e) = tracing::subscriber::set_global_default(subscriber) { + return Err(format!( + "Registering Substrate tracing subscriber failed: {:}!", e + )) } - - writeln!(buf, "{}", output) - }); - - if builder.try_init().is_err() { - info!("💬 Not registering Substrate logger, as there is already a global logger registered!"); - } -} - -fn kill_color(s: &str) -> String { - lazy_static! { - static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); } - RE.replace_all(s, "").to_string() + Ok(()) } diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index a6eb438cc07806a020ec7b3eab40e53e70a4e764..3c04d63144595680394e00fecd4a699ed446a304 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -21,7 +21,9 @@ use sc_service::config::KeystoreConfig; use std::fs; use std::path::PathBuf; use structopt::StructOpt; -use sp_core::crypto::SecretString; +use crate::error; +use sp_core::crypto::{SecretString, Zeroize}; +use std::str::FromStr; /// default sub directory for the key store const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; @@ -73,7 +75,6 @@ impl KeystoreParams { let mut password = input_keystore_password()?; let secret = std::str::FromStr::from_str(password.as_str()) .map_err(|()| "Error reading password")?; - use sp_core::crypto::Zeroize; password.zeroize(); Some(secret) } @@ -84,7 +85,6 @@ impl KeystoreParams { .map_err(|e| format!("{}", e))?; let secret = std::str::FromStr::from_str(password.as_str()) .map_err(|()| "Error reading password")?; - use sp_core::crypto::Zeroize; password.zeroize(); Some(secret) } else { @@ -98,6 +98,22 @@ impl KeystoreParams { Ok(KeystoreConfig::Path { path, password }) } + + /// helper method to fetch password from `KeyParams` or read from stdin + pub fn read_password(&self) -> error::Result> { + let (password_interactive, password) = (self.password_interactive, self.password.clone()); + + let pass = if password_interactive { + let mut password = rpassword::read_password_from_tty(Some("Key password: "))?; + let pass = Some(FromStr::from_str(&password).map_err(|()| "Error reading password")?); + password.zeroize(); + pass + } else { + password + }; + + Ok(pass) + } } #[cfg(not(target_os = "unknown"))] diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index f648337ed0af6846193d1afaf8518d223d7868c9..93467bc8ec63778e95d116b57cf8b6dc92c12636 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -25,8 +25,11 @@ mod pruning_params; mod shared_params; mod transaction_pool_params; -use std::{fmt::Debug, str::FromStr}; +use std::{fmt::Debug, str::FromStr, convert::TryFrom}; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, NumberFor}}; +use sp_core::crypto::Ss58AddressFormat; +use crate::arg_enums::{OutputType, CryptoScheme}; +use structopt::StructOpt; pub use crate::params::database_params::*; pub use crate::params::import_params::*; @@ -39,10 +42,10 @@ pub use crate::params::shared_params::*; pub use crate::params::transaction_pool_params::*; /// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal. -#[derive(Debug)] -pub struct BlockNumber(String); +#[derive(Debug, Clone)] +pub struct GenericNumber(String); -impl FromStr for BlockNumber { +impl FromStr for GenericNumber { type Err = String; fn from_str(block_number: &str) -> Result { @@ -57,15 +60,15 @@ impl FromStr for BlockNumber { } } -impl BlockNumber { +impl GenericNumber { /// Wrapper on top of `std::str::parse` but with `Error` as a `String` /// /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate /// documentation. pub fn parse(&self) -> Result - where - N: FromStr, - N::Err: std::fmt::Debug, + where + N: FromStr, + N::Err: std::fmt::Debug, { FromStr::from_str(&self.0).map_err(|e| format!("Failed to parse block number: {:?}", e)) } @@ -89,7 +92,7 @@ impl FromStr for BlockNumberOrHash { Ok(Self(block_number.into())) } } else { - BlockNumber::from_str(block_number).map(|v| Self(v.0)) + GenericNumber::from_str(block_number).map(|v| Self(v.0)) } } } @@ -109,11 +112,55 @@ impl BlockNumberOrHash { .map_err(|e| format!("Failed to parse block hash: {:?}", e))? )) } else { - BlockNumber(self.0.clone()).parse().map(BlockId::Number) + GenericNumber(self.0.clone()).parse().map(BlockId::Number) } } } + +/// Optional flag for specifying crypto algorithm +#[derive(Debug, StructOpt)] +pub struct CryptoSchemeFlag { + /// cryptography scheme + #[structopt( + long, + value_name = "SCHEME", + possible_values = &CryptoScheme::variants(), + case_insensitive = true, + default_value = "Sr25519" + )] + pub scheme: CryptoScheme, +} + +/// Optional flag for specifying output type +#[derive(Debug, StructOpt)] +pub struct OutputTypeFlag { + /// output format + #[structopt( + long, + value_name = "FORMAT", + possible_values = &OutputType::variants(), + case_insensitive = true, + default_value = "Text" + )] + pub output_type: OutputType, +} + +/// Optional flag for specifying network scheme +#[derive(Debug, StructOpt)] +pub struct NetworkSchemeFlag { + /// network address format + #[structopt( + long, + value_name = "NETWORK", + short = "n", + possible_values = &Ss58AddressFormat::all_names()[..], + parse(try_from_str = Ss58AddressFormat::try_from), + case_insensitive = true, + )] + pub network: Option, +} + #[cfg(test)] mod tests { use super::*; diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 253585544d2600dd00bf9ba7fea3a6bca60b9a7d..faaf2c2bd210d6f430dca7e491ff50049af9817b 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -114,8 +114,9 @@ impl NetworkParams { client_id: &str, node_name: &str, node_key: NodeKeyConfig, + default_listen_port: u16, ) -> NetworkConfiguration { - let port = self.port.unwrap_or(30333); + let port = self.port.unwrap_or(default_listen_port); let listen_addresses = if self.listen_addr.is_empty() { vec![ @@ -147,6 +148,7 @@ impl NetworkParams { listen_addresses, public_addresses, notifications_protocols: Vec::new(), + request_response_protocols: Vec::new(), node_key, node_name: node_name.to_string(), client_version: client_id.to_string(), diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 689cc6c681c8339e79937e3dbb8ac9e3cfe04c6f..875411fbfb62000d3e8c5bfc00cce52d3082fa04 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::config::NodeKeyConfig; +use sc_network::{config::identity::ed25519, config::NodeKeyConfig}; use sp_core::H256; use std::{path::PathBuf, str::FromStr}; use structopt::StructOpt; @@ -83,7 +83,7 @@ pub struct NodeKeyParams { /// as follows: /// /// `ed25519`: - /// The file must contain an unencoded 32 byte Ed25519 secret key. + /// The file must contain an unencoded 32 byte or hex encoded Ed25519 secret key. /// /// If the file does not exist, it is created with a newly generated secret key of /// the chosen type. @@ -100,12 +100,11 @@ impl NodeKeyParams { let secret = if let Some(node_key) = self.node_key.as_ref() { parse_ed25519_secret(node_key)? } else { - let path = self - .node_key_file - .clone() - .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)); - - sc_network::config::Secret::File(path) + sc_network::config::Secret::File( + self.node_key_file + .clone() + .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)) + ) }; NodeKeyConfig::Ed25519(secret) @@ -124,7 +123,7 @@ fn parse_ed25519_secret(hex: &str) -> error::Result error::Result error::Result<()> { - NodeKeyType::variants().iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - let tmp = tempfile::Builder::new().prefix("alice").tempdir()?; - let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); - let params = NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: Some(file.clone()), - }; - params.node_key(net_config_dir).and_then(|c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if node_key_type == NodeKeyType::Ed25519 && f == &file => - { - Ok(()) - } - _ => Err(error::Error::Input("Unexpected node key config".into())), - }) - }) + fn check_key(file: PathBuf, key: &ed25519::SecretKey) { + let params = NodeKeyParams { + node_key_type: NodeKeyType::Ed25519, + node_key: None, + node_key_file: Some(file), + }; + + let node_key = params.node_key(&PathBuf::from("not-used")) + .expect("Creates node key config") + .into_keypair() + .expect("Creates node key pair"); + + match node_key { + Keypair::Ed25519(ref pair) + if pair.secret().as_ref() == key.as_ref() => {} + _ => panic!("Invalid key"), + } } - assert!(secret_file(&PathBuf::from_str("x").unwrap()).is_ok()); + let tmp = tempfile::Builder::new().prefix("alice").tempdir().expect("Creates tempfile"); + let file = tmp.path().join("mysecret").to_path_buf(); + let key = ed25519::SecretKey::generate(); + + fs::write(&file, hex::encode(key.as_ref())).expect("Writes secret key"); + check_key(file.clone(), &key); + + fs::write(&file, &key).expect("Writes secret key"); + check_key(file.clone(), &key); } #[test] diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index bdbf55eb8326ac25cf2a98ded93b11777957681a..64bd88d63130bc6049c3e162024e7e27f73f72aa 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -18,7 +18,6 @@ use crate::CliConfiguration; use crate::Result; -use crate::Subcommand; use crate::SubstrateCli; use chrono::prelude::*; use futures::pin_mut; @@ -26,10 +25,8 @@ use futures::select; use futures::{future, future::FutureExt, Future}; use log::info; use sc_service::{Configuration, TaskType, TaskManager}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; -use std::{fmt::Debug, marker::PhantomData, str::FromStr, sync::Arc}; -use sc_client_api::{UsageProvider, BlockBackend, StorageProvider}; +use std::marker::PhantomData; #[cfg(target_family = "unix")] async fn main(func: F) -> std::result::Result<(), Box> @@ -96,7 +93,7 @@ pub fn build_runtime() -> std::result::Result( mut tokio_runtime: tokio::runtime::Runtime, future: FUT, - mut task_manager: TaskManager, + task_manager: TaskManager, ) -> Result<()> where FUT: Future> + future::Future, @@ -106,9 +103,7 @@ where pin_mut!(f); tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; - - task_manager.terminate(); - drop(tokio_runtime); + tokio_runtime.block_on(task_manager.clean_shutdown()); Ok(()) } @@ -175,52 +170,6 @@ impl Runner { info!("⛓ Native runtime: {}", C::native_runtime_version(&self.config.chain_spec)); } - /// A helper function that runs a future with tokio and stops if the process receives the signal - /// `SIGTERM` or `SIGINT`. - pub fn run_subcommand(self, subcommand: &Subcommand, builder: BU) - -> Result<()> - where - BU: FnOnce(Configuration) - -> sc_service::error::Result<(Arc, Arc, IQ, TaskManager)>, - B: BlockT + for<'de> serde::Deserialize<'de>, - BA: sc_client_api::backend::Backend + 'static, - IQ: sc_service::ImportQueue + 'static, - ::Hash: FromStr, - <::Hash as FromStr>::Err: Debug, - <<::Header as HeaderT>::Number as FromStr>::Err: Debug, - CL: UsageProvider + BlockBackend + StorageProvider + Send + Sync + - 'static, - { - let chain_spec = self.config.chain_spec.cloned_box(); - let network_config = self.config.network.clone(); - let db_config = self.config.database.clone(); - - match subcommand { - Subcommand::BuildSpec(cmd) => cmd.run(chain_spec, network_config), - Subcommand::ExportBlocks(cmd) => { - let (client, _, _, task_manager) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, db_config), task_manager) - } - Subcommand::ImportBlocks(cmd) => { - let (client, _, import_queue, task_manager) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, import_queue), task_manager) - } - Subcommand::CheckBlock(cmd) => { - let (client, _, import_queue, task_manager) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, import_queue), task_manager) - } - Subcommand::Revert(cmd) => { - let (client, backend, _, task_manager) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, backend), task_manager) - }, - Subcommand::PurgeChain(cmd) => cmd.run(db_config), - Subcommand::ExportState(cmd) => { - let (client, _, _, task_manager) = builder(self.config)?; - run_until_exit(self.tokio_runtime, cmd.run(client, chain_spec), task_manager) - }, - } - } - /// A helper function that runs a node with tokio and stops if the process receives the signal /// `SIGTERM` or `SIGINT`. pub fn run_node_until_exit( diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 6bf60335b7bd5e54499282b10b85185a352d4971..bba3d3e1a32200ea16e71abfe2ecdcb6e6526f92 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -1,48 +1,49 @@ [package] name = "sc-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-rc5", path = "../../../primitives/consensus/aura" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } +sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0", path = "../../../primitives/consensus/aura" } +sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } +sc-client-api = { version = "2.0.0", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" futures = "0.3.4" futures-timer = "3.0.1" -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sc-keystore = { version = "2.0.0-rc5", path = "../../keystore" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sc-keystore = { version = "2.0.0", path = "../../keystore" } log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-version = { version = "2.0.0-rc5", path = "../../../primitives/version" } -sc-consensus-slots = { version = "0.8.0-rc5", path = "../slots" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-rc5", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../telemetry" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-version = { version = "2.0.0", path = "../../../primitives/version" } +sc-consensus-slots = { version = "0.8.0", path = "../slots" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0", path = "../../telemetry" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} [dev-dependencies] -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-rc5", path = "../../executor" } -sc-network = { version = "0.8.0-rc5", path = "../../network" } -sc-network-test = { version = "0.8.0-rc5", path = "../../network/test" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -env_logger = "0.7.0" +sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +sc-executor = { version = "0.8.0", path = "../../executor" } +sc-network = { version = "0.8.0", path = "../../network" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sc-service = { version = "0.8.0", default-features = false, path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/aura/README.md b/client/consensus/aura/README.md new file mode 100644 index 0000000000000000000000000000000000000000..85d82cd7dfd3b4e392c8210c40a32a6e57508413 --- /dev/null +++ b/client/consensus/aura/README.md @@ -0,0 +1,15 @@ +Aura (Authority-round) consensus in substrate. + +Aura works by having a list of authorities A who are expected to roughly +agree on the current time. Time is divided up into discrete slots of t +seconds each. For each slot s, the author of that slot is A[s % |A|]. + +The author is allowed to issue one block but not more during that slot, +and it will be built upon the longest valid chain that has been seen. + +Blocks from future steps will be either deferred or rejected depending on how +far in the future they are. + +NOTE: Aura itself is designed to be generic over the crypto used. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 4e6cb49f1129da7de69387b85d8d603d4600f7e9..d79caaa1d6ac5af076b31891caceecce7e6be123 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -492,14 +492,16 @@ fn check_header( } /// A verifier for Aura blocks. -pub struct AuraVerifier { +pub struct AuraVerifier { client: Arc, phantom: PhantomData

, inherent_data_providers: sp_inherents::InherentDataProviders, + can_author_with: CAW, } -impl AuraVerifier - where P: Send + Sync + 'static +impl AuraVerifier where + P: Send + Sync + 'static, + CAW: Send + Sync + 'static, { fn check_inherents( &self, @@ -507,11 +509,22 @@ impl AuraVerifier block_id: BlockId, inherent_data: InherentData, timestamp_now: u64, - ) -> Result<(), Error> - where C: ProvideRuntimeApi, C::Api: BlockBuilderApi + ) -> Result<(), Error> where + C: ProvideRuntimeApi, C::Api: BlockBuilderApi, + CAW: CanAuthorWith, { const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "aura", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -553,7 +566,7 @@ impl AuraVerifier } #[forbid(deprecated)] -impl Verifier for AuraVerifier where +impl Verifier for AuraVerifier where C: ProvideRuntimeApi + Send + Sync + @@ -565,6 +578,7 @@ impl Verifier for AuraVerifier where P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, + CAW: CanAuthorWith + Send + Sync + 'static, { fn verify( &mut self, @@ -812,7 +826,7 @@ impl BlockImport for AuraBlockImport( +pub fn import_queue( slot_duration: SlotDuration, block_import: I, justification_import: Option>, @@ -821,6 +835,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &S, registry: Option<&Registry>, + can_author_with: CAW, ) -> Result, sp_consensus::Error> where B: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, @@ -831,6 +846,7 @@ pub fn import_queue( P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, S: sp_core::traits::SpawnNamed, + CAW: CanAuthorWith + Send + Sync + 'static, { register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; initialize_authorities_cache(&*client)?; @@ -839,6 +855,7 @@ pub fn import_queue( client, inherent_data_providers, phantom: PhantomData, + can_author_with, }; Ok(BasicQueue::new( @@ -854,7 +871,7 @@ pub fn import_queue( #[cfg(test)] mod tests { use super::*; - use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof}; + use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor}; use sc_network_test::{Block as TestBlock, *}; use sp_runtime::traits::{Block as BlockT, DigestFor}; use sc_network::config::ProtocolConfig; @@ -924,7 +941,7 @@ mod tests { } impl TestNetFactory for AuraTestNet { - type Verifier = AuraVerifier; + type Verifier = AuraVerifier; type PeerData = (); /// Create new test network with peers and given config. @@ -951,6 +968,7 @@ mod tests { client, inherent_data_providers, phantom: Default::default(), + can_author_with: AlwaysCanAuthor, } }, PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"), @@ -973,7 +991,7 @@ mod tests { #[test] #[allow(deprecated)] fn authoring_blocks() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let net = AuraTestNet::new(3); let peers = &[ diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 1b6b705139cf5a54e350a8b0ffd4da3e00e286a1..046ba8cac30c505c19c41cbf0e11ad25af9a7b00 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" @@ -8,38 +8,39 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-consensus-babe" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" serde = { version = "1.0.104", features = ["derive"] } -sp-version = { version = "2.0.0-rc5", path = "../../../primitives/version" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0-rc5", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../telemetry" } -sc-keystore = { version = "2.0.0-rc5", path = "../../keystore" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../epochs" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-consensus-vrf = { version = "0.8.0-rc5", path = "../../../primitives/consensus/vrf" } -sc-consensus-uncles = { version = "0.8.0-rc5", path = "../uncles" } -sc-consensus-slots = { version = "0.8.0-rc5", path = "../slots" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../../primitives/utils" } -fork-tree = { version = "2.0.0-rc5", path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +sp-version = { version = "2.0.0", path = "../../../primitives/version" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0", path = "../../telemetry" } +sc-keystore = { version = "2.0.0", path = "../../keystore" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sc-consensus-epochs = { version = "0.8.0", path = "../epochs" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-consensus-vrf = { version = "0.8.0", path = "../../../primitives/consensus/vrf" } +sc-consensus-uncles = { version = "0.8.0", path = "../uncles" } +sc-consensus-slots = { version = "0.8.0", path = "../slots" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-utils = { version = "2.0.0", path = "../../../primitives/utils" } +fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" @@ -52,14 +53,14 @@ derive_more = "0.99.2" retain_mut = "0.1.1" [dev-dependencies] -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-rc5", path = "../../executor" } -sc-network = { version = "0.8.0-rc5", path = "../../network" } -sc-network-test = { version = "0.8.0-rc5", path = "../../network/test" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } -env_logger = "0.7.0" +sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +sc-executor = { version = "0.8.0", path = "../../executor" } +sc-network = { version = "0.8.0", path = "../../network" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sc-service = { version = "0.8.0", default-features = false, path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0", path = "../../block-builder" } rand_chacha = "0.2.2" tempfile = "3.1.0" diff --git a/client/consensus/babe/README.md b/client/consensus/babe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..faba3948ed71583bb04d4f36962ec92c806c1634 --- /dev/null +++ b/client/consensus/babe/README.md @@ -0,0 +1,48 @@ +# BABE (Blind Assignment for Blockchain Extension) + +BABE is a slot-based block production mechanism which uses a VRF PRNG to +randomly perform the slot allocation. On every slot, all the authorities +generate a new random number with the VRF function and if it is lower than a +given threshold (which is proportional to their weight/stake) they have a +right to produce a block. The proof of the VRF function execution will be +used by other peer to validate the legitimacy of the slot claim. + +The engine is also responsible for collecting entropy on-chain which will be +used to seed the given VRF PRNG. An epoch is a contiguous number of slots +under which we will be using the same authority set. During an epoch all VRF +outputs produced as a result of block production will be collected on an +on-chain randomness pool. Epoch changes are announced one epoch in advance, +i.e. when ending epoch N, we announce the parameters (randomness, +authorities, etc.) for epoch N+2. + +Since the slot assignment is randomized, it is possible that a slot is +assigned to multiple validators in which case we will have a temporary fork, +or that a slot is assigned to no validator in which case no block is +produced. Which means that block times are not deterministic. + +The protocol has a parameter `c` [0, 1] for which `1 - c` is the probability +of a slot being empty. The choice of this parameter affects the security of +the protocol relating to maximum tolerable network delays. + +In addition to the VRF-based slot assignment described above, which we will +call primary slots, the engine also supports a deterministic secondary slot +assignment. Primary slots take precedence over secondary slots, when +authoring the node starts by trying to claim a primary slot and falls back +to a secondary slot claim attempt. The secondary slot assignment is done +by picking the authority at index: + +`blake2_256(epoch_randomness ++ slot_number) % authorities_len`. + +The secondary slots supports either a `SecondaryPlain` or `SecondaryVRF` +variant. Comparing with `SecondaryPlain` variant, the `SecondaryVRF` variant +generates an additional VRF output. The output is not included in beacon +randomness, but can be consumed by parachains. + +The fork choice rule is weight-based, where weight equals the number of +primary blocks in the chain. We will pick the heaviest chain (more primary +blocks) and will go with the longest one in case of a tie. + +An in-depth description and analysis of the protocol can be found here: + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 280b38f4d6c323d31533451c99605c34b84bb70e..9e2b64142330e5cc6766fe0c80ed972965fb90b7 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -1,38 +1,39 @@ [package] name = "sc-consensus-babe-rpc" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-consensus-babe = { version = "0.8.0-rc5", path = "../" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../rpc-api" } -jsonrpc-core = "14.2.0" -jsonrpc-core-client = "14.2.0" -jsonrpc-derive = "14.2.1" -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../../primitives/consensus/babe" } +sc-consensus-babe = { version = "0.8.0", path = "../" } +sc-rpc-api = { version = "0.8.0", path = "../../../rpc-api" } +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" +sp-consensus-babe = { version = "0.8.0", path = "../../../../primitives/consensus/babe" } serde = { version = "1.0.104", features=["derive"] } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../../primitives/runtime" } -sc-consensus-epochs = { version = "0.8.0-rc5", path = "../../epochs" } +sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.8.0", path = "../../epochs" } futures = { version = "0.3.4", features = ["compat"] } derive_more = "0.99.2" -sp-api = { version = "2.0.0-rc5", path = "../../../../primitives/api" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../../primitives/application-crypto" } -sc-keystore = { version = "2.0.0-rc5", path = "../../../keystore" } +sp-api = { version = "2.0.0", path = "../../../../primitives/api" } +sp-consensus = { version = "0.8.0", path = "../../../../primitives/consensus/common" } +sp-core = { version = "2.0.0", path = "../../../../primitives/core" } +sp-application-crypto = { version = "2.0.0", path = "../../../../primitives/application-crypto" } +sc-keystore = { version = "2.0.0", path = "../../../keystore" } [dev-dependencies] -sc-consensus = { version = "0.8.0-rc5", path = "../../../consensus/common" } +sc-consensus = { version = "0.8.0", path = "../../../consensus/common" } serde_json = "1.0.50" -sp-keyring = { version = "2.0.0-rc5", path = "../../../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0", path = "../../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/README.md b/client/consensus/babe/rpc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e76dd3dc67f81d3b789e718acbb8cb8b0e755cec --- /dev/null +++ b/client/consensus/babe/rpc/README.md @@ -0,0 +1,3 @@ +RPC api for babe. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 4f26568d833430e7fd6454f8f0e58eeb4677688a..74078b4ee7b8a401753e1c6a8a46eadf2b5258ed 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -51,7 +51,7 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> } /// Load or initialize persistent epoch change data from backend. -pub(crate) fn load_epoch_changes( +pub fn load_epoch_changes( backend: &B, config: &BabeGenesisConfiguration, ) -> ClientResult> { diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 951d1467b4983fcd49a7b6cf77bc85ebba2e71fc..95f1653d8646dde86ea2d5000b72b9d0b143a373 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -126,9 +126,10 @@ use schnorrkel::SignatureError; use codec::{Encode, Decode}; use sp_api::ApiExt; -mod aux_schema; mod verification; mod migration; + +pub mod aux_schema; pub mod authorship; #[cfg(test)] mod tests; @@ -787,22 +788,24 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, select_chain: SelectChain, inherent_data_providers: sp_inherents::InherentDataProviders, config: Config, epoch_changes: SharedEpochChanges, time_source: TimeSource, + can_author_with: CAW, } -impl BabeVerifier +impl BabeVerifier where Block: BlockT, Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, { fn check_inherents( &self, @@ -810,6 +813,16 @@ where block_id: BlockId, inherent_data: InherentData, ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "babe", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -908,13 +921,15 @@ where } } -impl Verifier for BabeVerifier +impl Verifier + for BabeVerifier where Block: BlockT, Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, { fn verify( &mut self, @@ -980,13 +995,15 @@ where // the header is valid but let's check if there was something else already // proposed at the same slot by the given author. if there was, we will // report the equivocation to the runtime. - self.check_and_report_equivocation( + if let Err(err) = self.check_and_report_equivocation( slot_now, slot_number, &header, &verified_info.author, &origin, - )?; + ) { + warn!(target: "babe", "Error checking/reporting BABE equivocation: {:?}", err); + } // if the body is passed through, we need to use the runtime // to check that the internally-set timestamp in the inherents @@ -1035,7 +1052,7 @@ where } /// Register the babe inherent data provider, if not registered already. -fn register_babe_inherent_data_provider( +pub fn register_babe_inherent_data_provider( inherent_data_providers: &InherentDataProviders, slot_duration: u64, ) -> Result<(), sp_consensus::Error> { @@ -1422,7 +1439,7 @@ pub fn block_import( /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, @@ -1432,6 +1449,7 @@ pub fn import_queue( inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnNamed, registry: Option<&Registry>, + can_author_with: CAW, ) -> ClientResult> where Inner: BlockImport> + Send + Sync + 'static, @@ -1439,6 +1457,7 @@ pub fn import_queue( Client: HeaderBackend + HeaderMetadata, Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, { register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; @@ -1449,6 +1468,7 @@ pub fn import_queue( config: babe_link.config, epoch_changes: babe_link.epoch_changes, time_source: babe_link.time_source, + can_author_with, }; Ok(BasicQueue::new( diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 958d7845edbc678bad6d0c00bd35e8209d123de5..87876be8ae45639148478e6d4d558723b3d2dde1 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -31,7 +31,7 @@ use sp_consensus_babe::{ }; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, RecordProof, + NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, }; use sc_network_test::*; @@ -220,7 +220,7 @@ type TestSelectChain = substrate_test_runtime_client::LongestChain< >; pub struct TestVerifier { - inner: BabeVerifier, + inner: BabeVerifier, mutator: Mutator, } @@ -320,6 +320,7 @@ impl TestNetFactory for BabeTestNet { config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), time_source: data.link.time_source.clone(), + can_author_with: AlwaysCanAuthor, }, mutator: MUTATOR.with(|m| m.borrow().clone()), } @@ -346,7 +347,7 @@ impl TestNetFactory for BabeTestNet { #[test] #[should_panic] fn rejects_empty_block() { - env_logger::try_init().unwrap(); + sp_tracing::try_init_simple(); let mut net = BabeTestNet::new(3); let block_builder = |builder: BlockBuilder<_, _, _>| { builder.build().unwrap().block @@ -359,7 +360,7 @@ fn rejects_empty_block() { fn run_one_test( mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, ) { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mutator = Arc::new(mutator) as Mutator; MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); @@ -488,7 +489,7 @@ fn rejects_missing_consensus_digests() { #[test] fn wrong_consensus_engine_id_rejected() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let sig = AuthorityPair::generate().0.sign(b""); let bad_seal: Item = DigestItem::Seal([0; 4], sig.to_vec()); assert!(bad_seal.as_babe_pre_digest().is_none()); @@ -497,14 +498,14 @@ fn wrong_consensus_engine_id_rejected() { #[test] fn malformed_pre_digest_rejected() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, [0; 64].to_vec()); assert!(bad_seal.as_babe_pre_digest().is_none()); } #[test] fn sig_is_not_pre_digest() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let sig = AuthorityPair::generate().0.sign(b""); let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, sig.to_vec()); assert!(bad_seal.as_babe_pre_digest().is_none()); @@ -513,7 +514,7 @@ fn sig_is_not_pre_digest() { #[test] fn can_author_block() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") @@ -820,7 +821,7 @@ fn verify_slots_are_strictly_increasing() { #[test] fn babe_transcript_generation_match() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index a617cf77af3dd923b812000a2559b43ff62f2d51..0a8a4c43d7117c747a20f8c91e08ae1a719ee063 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -1,18 +1,19 @@ [package] name = "sc-consensus" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Collection of common consensus specific imlementations for Substrate (client)" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } diff --git a/client/consensus/common/README.md b/client/consensus/common/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a6717a1d7a6df682607e9ddf3c45d7bbf6115dfc --- /dev/null +++ b/client/consensus/common/README.md @@ -0,0 +1,3 @@ +Collection of common consensus specific implementations + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index 4a26611a7588632c6c88b0725ff86e53daeb98f4..d50ec29ed9c6e19a054d0b488e3d83bf2b13eb61 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-consensus-epochs" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parking_lot = "0.10.0" -fork-tree = { version = "2.0.0-rc5", path = "../../../utils/fork-tree" } -sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-rc5"} -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" , version = "2.0.0-rc5"} +fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0"} +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" , version = "2.0.0"} diff --git a/client/consensus/epochs/README.md b/client/consensus/epochs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1e74e04172c2400f41d9d3c18037132412feb299 --- /dev/null +++ b/client/consensus/epochs/README.md @@ -0,0 +1,3 @@ +Generic utilities for epoch-based consensus engines. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index ab77f355bfadeea9179cb519273a342f6b8e0d5d..0bb77d4f6cc44285ca6f7d32d8fc989e409411ed 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-consensus-manual-seal" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,28 +15,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" futures = "0.3.4" -jsonrpc-core = "14.2.0" -jsonrpc-core-client = "14.2.0" -jsonrpc-derive = "14.2.1" +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0", features=["derive"] } assert_matches = "1.3.0" -sc-client-api = { path = "../../../client/api", version = "2.0.0-rc5" } -sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0-rc5" } -sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0-rc5" } -sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0-rc5" } -sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0-rc5" } -sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0-rc5" } -sp-core = { path = "../../../primitives/core", version = "2.0.0-rc5" } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0-rc5" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5" } +sc-client-api = { path = "../../api", version = "2.0.0" } +sc-consensus-babe = { path = "../../consensus/babe", version = "0.8.0" } +sc-consensus-epochs = { path = "../../consensus/epochs", version = "0.8.0" } +sp-consensus-babe = { path = "../../../primitives/consensus/babe", version = "0.8.0" } +sc-keystore = { path = "../../keystore", version = "2.0.0" } + +sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0" } +sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0" } +sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0" } +sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0" } +sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0" } +sp-core = { path = "../../../primitives/core", version = "2.0.0" } +sp-api = { path = "../../../primitives/api", version = "2.0.0" } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0" } +sp-timestamp = { path = "../../../primitives/timestamp", version = "2.0.0" } + +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0" } [dev-dependencies] -sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0-rc5" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0-rc5" } -substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0-rc5" } tokio = { version = "0.2", features = ["rt-core", "macros"] } -env_logger = "0.7.0" +sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0" } +substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0" } +substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0" } tempfile = "3.1.0" diff --git a/client/consensus/manual-seal/README.md b/client/consensus/manual-seal/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b355f8b73183cc9661b4c65d76a060d9f6cc8918 --- /dev/null +++ b/client/consensus/manual-seal/README.md @@ -0,0 +1,4 @@ +A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. +This is suitable for a testing environment. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs new file mode 100644 index 0000000000000000000000000000000000000000..7bafeb50207d486f93055c20e5bfe2649586e743 --- /dev/null +++ b/client/consensus/manual-seal/src/consensus.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Extensions for manual seal to produce blocks valid for any runtime. +use super::Error; + +use sp_runtime::traits::{Block as BlockT, DigestFor}; +use sp_inherents::InherentData; +use sp_consensus::BlockImportParams; + +pub mod babe; + +/// Consensus data provider, manual seal uses this trait object for authoring blocks valid +/// for any runtime. +pub trait ConsensusDataProvider: Send + Sync { + /// Block import transaction type + type Transaction; + + /// Attempt to create a consensus digest. + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error>; + + /// set up the neccessary import params. + fn append_block_import( + &self, + parent: &B::Header, + params: &mut BlockImportParams, + inherents: &InherentData + ) -> Result<(), Error>; +} diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs new file mode 100644 index 0000000000000000000000000000000000000000..71dd250733ad92deaa5342be6cf5c487da9659e3 --- /dev/null +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -0,0 +1,197 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! BABE consensus data provider + +use super::ConsensusDataProvider; +use crate::Error; + +use std::{ + any::Any, + borrow::Cow, + sync::{Arc, atomic}, + time::SystemTime, +}; +use sc_client_api::AuxStore; +use sc_consensus_babe::{ + Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, + register_babe_inherent_data_provider, INTERMEDIATE_KEY, +}; +use sc_consensus_epochs::{SharedEpochChanges, descendent_query}; +use sc_keystore::KeyStorePtr; + +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_consensus::BlockImportParams; +use sp_consensus_babe::{BabeApi, inherents::BabeInherentData}; +use sp_inherents::{InherentDataProviders, InherentData, ProvideInherentData, InherentIdentifier}; +use sp_runtime::{ + traits::{DigestItemFor, DigestFor, Block as BlockT, Header as _}, + generic::Digest, +}; +use sp_timestamp::{InherentType, InherentError, INHERENT_IDENTIFIER}; + +/// Provides BABE-compatible predigests and BlockImportParams. +/// Intended for use with BABE runtimes. +pub struct BabeConsensusDataProvider { + /// shared reference to keystore + keystore: KeyStorePtr, + + /// Shared reference to the client. + client: Arc, + + /// Shared epoch changes + epoch_changes: SharedEpochChanges, + + /// BABE config, gotten from the runtime. + config: Config, +} + +impl BabeConsensusDataProvider + where + B: BlockT, + C: AuxStore + ProvideRuntimeApi, + C::Api: BabeApi, +{ + pub fn new( + client: Arc, + keystore: KeyStorePtr, + provider: &InherentDataProviders, + epoch_changes: SharedEpochChanges, + ) -> Result { + let config = Config::get_or_compute(&*client)?; + let timestamp_provider = SlotTimestampProvider::new(config.slot_duration)?; + + provider.register_provider(timestamp_provider)?; + register_babe_inherent_data_provider(provider, config.slot_duration)?; + + Ok(Self { + config, + client, + keystore, + epoch_changes, + }) + } +} + +impl ConsensusDataProvider for BabeConsensusDataProvider + where + B: BlockT, + C: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + C::Api: BabeApi, +{ + type Transaction = TransactionFor; + + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error> { + let slot_number = inherents.babe_inherent_data()?; + + let epoch_changes = self.epoch_changes.lock(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot_number, + ) + .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + let epoch = epoch_changes + .viable_epoch( + &epoch_descriptor, + |slot| Epoch::genesis(&self.config, slot), + ) + .ok_or_else(|| { + log::info!(target: "babe", "create_digest: no viable_epoch :("); + sp_consensus::Error::InvalidAuthoritiesSet + })?; + + // this is a dev node environment, we should always be able to claim a slot. + let (predigest, _) = authorship::claim_slot(slot_number, epoch.as_ref(), &self.keystore) + .ok_or_else(|| Error::StringError("failed to claim slot for authorship".into()))?; + + Ok(Digest { + logs: vec![ + as CompatibleDigestItem>::babe_pre_digest(predigest), + ], + }) + } + + fn append_block_import( + &self, + parent: &B::Header, + params: &mut BlockImportParams, + inherents: &InherentData + ) -> Result<(), Error> { + let slot_number = inherents.babe_inherent_data()?; + + let epoch_descriptor = self.epoch_changes.lock() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot_number, + ) + .map_err(|e| Error::StringError(format!("failed to fetch epoch data: {}", e)))? + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + + Ok(()) + } +} + +/// Provide duration since unix epoch in millisecond for timestamp inherent. +/// Mocks the timestamp inherent to always produce the timestamp for the next babe slot. +struct SlotTimestampProvider { + time: atomic::AtomicU64, + slot_duration: u64 +} + +impl SlotTimestampProvider { + /// create a new mocked time stamp provider. + fn new(slot_duration: u64) -> Result { + let now = SystemTime::now(); + let duration = now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|err| Error::StringError(format!("{}", err)))?; + Ok(Self { + time: atomic::AtomicU64::new(duration.as_millis() as u64), + slot_duration, + }) + } +} + +impl ProvideInherentData for SlotTimestampProvider { + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), sp_inherents::Error> { + // we update the time here. + let duration: InherentType = self.time.fetch_add(self.slot_duration, atomic::Ordering::SeqCst); + inherent_data.put_data(INHERENT_IDENTIFIER, &duration)?; + Ok(()) + } + + fn error_to_string(&self, error: &[u8]) -> Option { + InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) + } +} \ No newline at end of file diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index 2411a839b027ceb51b6e927f5c09ad030571231a..e2628008c24c7c2a3c203af47c0088ea635d13b5 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -18,6 +18,7 @@ //! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. //! This is suitable for a testing environment. + use sp_consensus::{Error as ConsensusError, ImportResult}; use sp_blockchain::Error as BlockchainError; use sp_inherents::Error as InherentsError; diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 36aeffd9794f084c24fc2bf86892a3e00ae62cd0..0a8ed28a27c81bb1df3d2146a8bacbab5292c1bc 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -21,8 +21,9 @@ use futures::prelude::*; use sp_consensus::{ - Environment, Proposer, ForkChoiceStrategy, BlockImportParams, BlockOrigin, SelectChain, - import_queue::{BasicQueue, CacheKeyId, Verifier, BoxBlockImport}, + Environment, Proposer, SelectChain, BlockImport, + ForkChoiceStrategy, BlockImportParams, BlockOrigin, + import_queue::{Verifier, BasicQueue, CacheKeyId, BoxBlockImport}, }; use sp_blockchain::HeaderBackend; use sp_inherents::InherentDataProviders; @@ -34,17 +35,19 @@ use prometheus_endpoint::Registry; mod error; mod finalize_block; -mod seal_new_block; +mod seal_block; + +pub mod consensus; pub mod rpc; -use self::{ - finalize_block::{finalize_block, FinalizeBlockParams}, - seal_new_block::{seal_new_block, SealBlockParams}, -}; pub use self::{ error::Error, + consensus::ConsensusDataProvider, + finalize_block::{finalize_block, FinalizeBlockParams}, + seal_block::{SealBlockParams, seal_block, MAX_PROPOSAL_DURATION}, rpc::{EngineCommand, CreatedBlock}, }; +use sp_api::{ProvideRuntimeApi, TransactionFor}; /// The verifier for the manual seal engine; instantly finalizes. struct ManualSealVerifier; @@ -87,25 +90,83 @@ pub fn import_queue( ) } +/// Params required to start the instant sealing authorship task. +pub struct ManualSealParams, A: txpool::ChainApi, SC, CS> { + /// Block import instance for well. importing blocks. + pub block_import: BI, + + /// The environment we are producing blocks for. + pub env: E, + + /// Client instance + pub client: Arc, + + /// Shared reference to the transaction pool. + pub pool: Arc>, + + /// Stream, Basically the receiving end of a channel for sending commands to + /// the authorship task. + pub commands_stream: CS, + + /// SelectChain strategy. + pub select_chain: SC, + + /// Digest provider for inclusion in blocks. + pub consensus_data_provider: Option>>>, + + /// Provider for inherents to include in blocks. + pub inherent_data_providers: InherentDataProviders, +} + +/// Params required to start the manual sealing authorship task. +pub struct InstantSealParams, A: txpool::ChainApi, SC> { + /// Block import instance for well. importing blocks. + pub block_import: BI, + + /// The environment we are producing blocks for. + pub env: E, + + /// Client instance + pub client: Arc, + + /// Shared reference to the transaction pool. + pub pool: Arc>, + + /// SelectChain strategy. + pub select_chain: SC, + + /// Digest provider for inclusion in blocks. + pub consensus_data_provider: Option>>>, + + /// Provider for inherents to include in blocks. + pub inherent_data_providers: InherentDataProviders, +} + /// Creates the background authorship task for the manual seal engine. -pub async fn run_manual_seal( - mut block_import: BoxBlockImport, - mut env: E, - client: Arc, - pool: Arc>, - mut commands_stream: S, - select_chain: SC, - inherent_data_providers: InherentDataProviders, +pub async fn run_manual_seal( + ManualSealParams { + mut block_import, + mut env, + client, + pool, + mut commands_stream, + select_chain, + inherent_data_providers, + consensus_data_provider, + .. + }: ManualSealParams ) where A: txpool::ChainApi + 'static, B: BlockT + 'static, - C: HeaderBackend + Finalizer + 'static, + BI: BlockImport> + + Send + Sync + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, E::Error: std::fmt::Display, >::Error: std::fmt::Display, - S: Stream::Hash>> + Unpin + 'static, + CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, { while let Some(command) = commands_stream.next().await { @@ -116,7 +177,7 @@ pub async fn run_manual_seal( parent_hash, sender, } => { - seal_new_block( + seal_block( SealBlockParams { sender, parent_hash, @@ -126,6 +187,7 @@ pub async fn run_manual_seal( select_chain: &select_chain, block_import: &mut block_import, inherent_data_provider: &inherent_data_providers, + consensus_data_provider: consensus_data_provider.as_ref().map(|p| &**p), pool: pool.clone(), client: client.clone(), } @@ -149,18 +211,24 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( - block_import: BoxBlockImport, - env: E, - client: Arc, - pool: Arc>, - select_chain: SC, - inherent_data_providers: InherentDataProviders, +pub async fn run_instant_seal( + InstantSealParams { + block_import, + env, + client, + pool, + select_chain, + consensus_data_provider, + inherent_data_providers, + .. + }: InstantSealParams ) where A: txpool::ChainApi + 'static, B: BlockT + 'static, - C: HeaderBackend + Finalizer + 'static, + BI: BlockImport> + + Send + Sync + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, E::Error: std::fmt::Display, @@ -181,13 +249,16 @@ pub async fn run_instant_seal( }); run_manual_seal( - block_import, - env, - client, - pool, - commands_stream, - select_chain, - inherent_data_providers, + ManualSealParams { + block_import, + env, + client, + pool, + commands_stream, + select_chain, + consensus_data_provider, + inherent_data_providers, + } ).await } @@ -233,7 +304,7 @@ mod tests { // this test checks that blocks are created as soon as transactions are imported into the pool. let (sender, receiver) = futures::channel::oneshot::channel(); let mut sender = Arc::new(Some(sender)); - let stream = pool.pool().validated_pool().import_notification_stream() + let commands_stream = pool.pool().validated_pool().import_notification_stream() .map(move |_| { // we're only going to submit one tx so this fn will only be called once. let mut_sender = Arc::get_mut(&mut sender).unwrap(); @@ -246,13 +317,16 @@ mod tests { } }); let future = run_manual_seal( - Box::new(client.clone()), - env, - client.clone(), - pool.pool().clone(), - stream, - select_chain, - inherent_data_providers, + ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.pool().clone(), + commands_stream, + select_chain, + inherent_data_providers, + consensus_data_provider: None, + } ); std::thread::spawn(|| { let mut rt = tokio::runtime::Runtime::new().unwrap(); @@ -299,15 +373,18 @@ mod tests { None, ); // this test checks that blocks are created as soon as an engine command is sent over the stream. - let (mut sink, stream) = futures::channel::mpsc::channel(1024); + let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); let future = run_manual_seal( - Box::new(client.clone()), - env, - client.clone(), - pool.pool().clone(), - stream, - select_chain, - inherent_data_providers, + ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.pool().clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + inherent_data_providers, + } ); std::thread::spawn(|| { let mut rt = tokio::runtime::Runtime::new().unwrap(); @@ -371,15 +448,18 @@ mod tests { None, ); // this test checks that blocks are created as soon as an engine command is sent over the stream. - let (mut sink, stream) = futures::channel::mpsc::channel(1024); + let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); let future = run_manual_seal( - Box::new(client.clone()), - env, - client.clone(), - pool.pool().clone(), - stream, - select_chain, - inherent_data_providers, + ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.pool().clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + inherent_data_providers, + } ); std::thread::spawn(|| { let mut rt = tokio::runtime::Runtime::new().unwrap(); diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index f3f0fe4a128ed86c0135ccfad5111708d7b37bbf..690b6c1eb9996d086d0a2397b00cdc2c1d5145d0 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! RPC interface for the ManualSeal Engine. +//! RPC interface for the `ManualSeal` Engine. + use sp_consensus::ImportedAux; use jsonrpc_core::Error; use jsonrpc_derive::rpc; diff --git a/client/consensus/manual-seal/src/seal_new_block.rs b/client/consensus/manual-seal/src/seal_block.rs similarity index 73% rename from client/consensus/manual-seal/src/seal_new_block.rs rename to client/consensus/manual-seal/src/seal_block.rs index c5aea11ced316bba9a329ed954a01afcd49263ff..58f017f2d41ad41396be3540828ae3c05fee5bba 100644 --- a/client/consensus/manual-seal/src/seal_new_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -16,7 +16,7 @@ //! Block sealing utilities -use crate::{Error, rpc}; +use crate::{Error, rpc, CreatedBlock, ConsensusDataProvider}; use std::sync::Arc; use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, @@ -24,24 +24,21 @@ use sp_runtime::{ }; use futures::prelude::*; use sc_transaction_pool::txpool; -use rpc::CreatedBlock; - use sp_consensus::{ - self, BlockImport, Environment, Proposer, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, - ImportResult, SelectChain, - import_queue::BoxBlockImport, + self, BlockImport, Environment, Proposer, ForkChoiceStrategy, + BlockImportParams, BlockOrigin, ImportResult, SelectChain, }; use sp_blockchain::HeaderBackend; use std::collections::HashMap; use std::time::Duration; use sp_inherents::InherentDataProviders; +use sp_api::{ProvideRuntimeApi, TransactionFor}; /// max duration for creating a proposal in secs -const MAX_PROPOSAL_DURATION: u64 = 10; +pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, SC, HB, E, T, P: txpool::ChainApi> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: txpool::ChainApi> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -54,19 +51,21 @@ pub struct SealBlockParams<'a, B: BlockT, SC, HB, E, T, P: txpool::ChainApi> { /// transaction pool pub pool: Arc>, /// header backend - pub client: Arc, + pub client: Arc, /// Environment trait object for creating a proposer pub env: &'a mut E, /// SelectChain object pub select_chain: &'a SC, + /// Digest provider for inclusion in blocks. + pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>>, /// block import object - pub block_import: &'a mut BoxBlockImport, + pub block_import: &'a mut BI, /// inherent data provider pub inherent_data_provider: &'a InherentDataProviders, } /// seals a new block with the given params -pub async fn seal_new_block( +pub async fn seal_block( SealBlockParams { create_empty, finalize, @@ -77,13 +76,16 @@ pub async fn seal_new_block( block_import, env, inherent_data_provider, + consensus_data_provider: digest_provider, mut sender, .. - }: SealBlockParams<'_, B, SC, HB, E, T, P> + }: SealBlockParams<'_, B, BI, SC, C, E, P> ) where B: BlockT, - HB: HeaderBackend, + BI: BlockImport> + + Send + Sync + 'static, + C: HeaderBackend + ProvideRuntimeApi, E: Environment, >::Error: std::fmt::Display, >::Error: std::fmt::Display, @@ -98,7 +100,7 @@ pub async fn seal_new_block( // get the header to build this new block on. // use the parent_hash supplied via `EngineCommand` // or fetch the best_block. - let header = match parent_hash { + let parent = match parent_hash { Some(hash) => { match client.header(BlockId::Hash(hash))? { Some(header) => header, @@ -108,11 +110,18 @@ pub async fn seal_new_block( None => select_chain.best_chain()? }; - let proposer = env.init(&header) + let proposer = env.init(&parent) .map_err(|err| Error::StringError(format!("{}", err))).await?; let id = inherent_data_provider.create_inherent_data()?; let inherents_len = id.len(); - let proposal = proposer.propose(id, Default::default(), Duration::from_secs(MAX_PROPOSAL_DURATION), false.into()) + + let digest = if let Some(digest_provider) = digest_provider { + digest_provider.create_digest(&parent, &id)? + } else { + Default::default() + }; + + let proposal = proposer.propose(id.clone(), digest, Duration::from_secs(MAX_PROPOSAL_DURATION), false.into()) .map_err(|err| Error::StringError(format!("{}", err))).await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { @@ -125,6 +134,10 @@ pub async fn seal_new_block( params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + if let Some(digest_provider) = digest_provider { + digest_provider.append_block_import(&parent, &mut params, &id)?; + } + match block_import.import_block(params, HashMap::new())? { ImportResult::Imported(aux) => { Ok(CreatedBlock { hash: ::Header::hash(&header), aux }) diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index b72166f9ce9b2636af3c2a3de728cd4073b4d1d4..fbb02ccc71121a1855932dffb624a765593dbbb4 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -1,29 +1,32 @@ [package] name = "sc-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } -sp-consensus-pow = { version = "0.8.0-rc5", path = "../../../primitives/consensus/pow" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sp-consensus-pow = { version = "0.8.0", path = "../../../primitives/consensus/pow" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } -sp-timestamp = { version = "2.0.0-rc5", path = "../../../primitives/timestamp" } +futures-timer = "3.0.1" +parking_lot = "0.10.0" +sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } derive_more = "0.99.2" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} diff --git a/client/consensus/pow/README.md b/client/consensus/pow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a335ec367047b9762cc01132da1b39e3d4d3ed0d --- /dev/null +++ b/client/consensus/pow/README.md @@ -0,0 +1,16 @@ +Proof of work consensus for Substrate. + +To use this engine, you can need to have a struct that implements +`PowAlgorithm`. After that, pass an instance of the struct, along +with other necessary client references to `import_queue` to setup +the queue. Use the `start_mine` function for basic CPU mining. + +The auxiliary storage for PoW engine only stores the total difficulty. +For other storage requirements for particular PoW algorithm (such as +the actual difficulty for each particular blocks), you can take a client +reference in your `PowAlgorithm` implementation, and use a separate prefix +for the auxiliary storage. It is also possible to just use the runtime +as the storage, but it is not recommended as it won't work well with light +clients. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 42d1bc050192c3a269f4c4c64e94d4ef70d393cf..b73b9aa91f802a0cae87e9b590f224100b5b8325 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -31,13 +31,17 @@ //! as the storage, but it is not recommended as it won't work well with light //! clients. -use std::sync::Arc; -use std::any::Any; -use std::borrow::Cow; -use std::thread; -use std::collections::HashMap; -use std::marker::PhantomData; -use sc_client_api::{BlockOf, backend::AuxStore}; +mod worker; + +pub use crate::worker::{MiningWorker, MiningMetadata, MiningBuild}; + +use std::{ + sync::Arc, any::Any, borrow::Cow, collections::HashMap, marker::PhantomData, + cmp::Ordering, time::Duration, +}; +use futures::{prelude::*, future::Either}; +use parking_lot::Mutex; +use sc_client_api::{BlockOf, backend::AuxStore, BlockchainEvents}; use sp_blockchain::{HeaderBackend, ProvideCache, well_known_cache_keys::Id as CacheKeyId}; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_runtime::{Justification, RuntimeString}; @@ -60,6 +64,8 @@ use sc_client_api; use log::*; use sp_timestamp::{InherentError as TIError, TimestampInherentData}; +use crate::worker::UntilImportedOrTimeout; + #[derive(derive_more::Display, Debug)] pub enum Error { #[display(fmt = "Header uses the wrong engine {:?}", _0)] @@ -88,10 +94,13 @@ pub enum Error { CreateInherents(sp_inherents::Error), #[display(fmt = "Checking inherents failed: {}", _0)] CheckInherents(String), + #[display(fmt = "Multiple pre-runtime digests")] + MultiplePreRuntimeDigests, Client(sp_blockchain::Error), Codec(codec::Error), Environment(String), - Runtime(RuntimeString) + Runtime(RuntimeString), + Other(String), } impl std::convert::From> for String { @@ -167,35 +176,44 @@ pub trait PowAlgorithm { ) -> Result, Error> { Ok(None) } + /// Break a fork choice tie. + /// + /// By default this chooses the earliest block seen. Using uniform tie + /// breaking algorithms will help to protect against selfish mining. + /// + /// Returns if the new seal should be considered best block. + fn break_tie( + &self, + _own_seal: &Seal, + _new_seal: &Seal, + ) -> bool { + false + } /// Verify that the difficulty is valid against given seal. fn verify( &self, parent: &BlockId, pre_hash: &B::Hash, + pre_digest: Option<&[u8]>, seal: &Seal, difficulty: Self::Difficulty, ) -> Result>; - /// Mine a seal that satisfies the given difficulty. - fn mine( - &self, - parent: &BlockId, - pre_hash: &B::Hash, - difficulty: Self::Difficulty, - round: u32, - ) -> Result, Error>; } /// A block importer for PoW. -pub struct PowBlockImport { +pub struct PowBlockImport { algorithm: Algorithm, inner: I, - select_chain: Option, + select_chain: S, client: Arc, inherent_data_providers: sp_inherents::InherentDataProviders, check_inherents_after: <::Header as HeaderT>::Number, + can_author_with: CAW, } -impl Clone for PowBlockImport { +impl Clone + for PowBlockImport +{ fn clone(&self) -> Self { Self { algorithm: self.algorithm.clone(), @@ -204,17 +222,19 @@ impl Clone for PowBlockImpor client: self.client.clone(), inherent_data_providers: self.inherent_data_providers.clone(), check_inherents_after: self.check_inherents_after.clone(), + can_author_with: self.can_author_with.clone(), } } } -impl PowBlockImport where +impl PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, + CAW: CanAuthorWith, { /// Create a new block import suitable to be used in PoW pub fn new( @@ -222,11 +242,19 @@ impl PowBlockImport where client: Arc, algorithm: Algorithm, check_inherents_after: <::Header as HeaderT>::Number, - select_chain: Option, + select_chain: S, inherent_data_providers: sp_inherents::InherentDataProviders, + can_author_with: CAW, ) -> Self { - Self { inner, client, algorithm, check_inherents_after, - select_chain, inherent_data_providers } + Self { + inner, + client, + algorithm, + check_inherents_after, + select_chain, + inherent_data_providers, + can_author_with, + } } fn check_inherents( @@ -242,6 +270,16 @@ impl PowBlockImport where return Ok(()) } + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "pow", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, @@ -270,7 +308,7 @@ impl PowBlockImport where } } -impl BlockImport for PowBlockImport where +impl BlockImport for PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, @@ -279,6 +317,7 @@ impl BlockImport for PowBlockImport, Algorithm: PowAlgorithm, Algorithm::Difficulty: 'static, + CAW: CanAuthorWith, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; @@ -295,12 +334,9 @@ impl BlockImport for PowBlockImport, new_cache: HashMap>, ) -> Result { - let best_hash = match self.select_chain.as_ref() { - Some(select_chain) => select_chain.best_chain() - .map_err(|e| format!("Fetch best chain failed via select chain: {:?}", e))? - .hash(), - None => self.client.info().best_hash, - }; + let best_header = self.select_chain.best_chain() + .map_err(|e| format!("Fetch best chain failed via select chain: {:?}", e))?; + let best_hash = best_header.hash(); let parent_hash = *block.header.parent_hash(); let best_aux = PowAux::read::<_, B>(self.client.as_ref(), &best_hash)?; @@ -323,16 +359,7 @@ impl BlockImport for PowBlockImport { - if id == &POW_ENGINE_ID { - seal.clone() - } else { - return Err(Error::::WrongEngine(*id).into()) - } - }, - _ => return Err(Error::::HeaderUnsealed(block.header.hash()).into()), - }; + let inner_seal = fetch_seal::(block.post_digests.last(), block.header.hash())?; let intermediate = block.take_intermediate::>( INTERMEDIATE_KEY @@ -344,9 +371,11 @@ impl BlockImport for PowBlockImport(&block.header)?; if !self.algorithm.verify( &BlockId::hash(parent_hash), &pre_hash, + pre_digest.as_ref().map(|v| &v[..]), &inner_seal, difficulty, )? { @@ -360,7 +389,18 @@ impl BlockImport for PowBlockImport best_aux.total_difficulty + match aux.total_difficulty.cmp(&best_aux.total_difficulty) { + Ordering::Less => false, + Ordering::Greater => true, + Ordering::Equal => { + let best_inner_seal = fetch_seal::( + best_header.digest().logs.last(), + best_hash, + )?; + + self.algorithm.break_tie(&best_inner_seal, &inner_seal) + }, + } )); } @@ -490,191 +530,205 @@ pub fn import_queue( )) } -/// Start the background mining thread for PoW. Note that because PoW mining -/// is CPU-intensive, it is not possible to use an async future to define this. -/// However, it's not recommended to use background threads in the rest of the -/// codebase. +/// Start the mining worker for PoW. This function provides the necessary helper functions that can +/// be used to implement a miner. However, it does not do the CPU-intensive mining itself. +/// +/// Two values are returned -- a worker, which contains functions that allows querying the current +/// mining metadata and submitting mined blocks, and a future, which must be polled to fill in +/// information in the worker. /// -/// `preruntime` is a parameter that allows a custom additional pre-runtime -/// digest to be inserted for blocks being built. This can encode authorship -/// information, or just be a graffiti. `round` is for number of rounds the -/// CPU miner runs each time. This parameter should be tweaked so that each -/// mining round is within sub-second time. -pub fn start_mine( - mut block_import: BoxBlockImport>, +/// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted +/// for blocks being built. This can encode authorship information, or just be a graffiti. +pub fn start_mining_worker( + block_import: BoxBlockImport>, client: Arc, + select_chain: S, algorithm: Algorithm, mut env: E, - preruntime: Option>, - round: u32, mut sync_oracle: SO, - build_time: std::time::Duration, - select_chain: Option, + pre_runtime: Option>, inherent_data_providers: sp_inherents::InherentDataProviders, + timeout: Duration, + build_time: Duration, can_author_with: CAW, -) where - C: HeaderBackend + AuxStore + ProvideRuntimeApi + 'static, - Algorithm: PowAlgorithm + Send + Sync + 'static, - E: Environment + Send + Sync + 'static, +) -> (Arc>>, impl Future) where + Block: BlockT, + C: ProvideRuntimeApi + BlockchainEvents + 'static, + S: SelectChain + 'static, + Algorithm: PowAlgorithm + Clone, + Algorithm::Difficulty: 'static, + E: Environment + Send + Sync + 'static, E::Error: std::fmt::Debug, - E::Proposer: Proposer>, - SO: SyncOracle + Send + Sync + 'static, - S: SelectChain + 'static, - CAW: CanAuthorWith + Send + 'static, + E::Proposer: Proposer>, + SO: SyncOracle + Clone + Send + Sync + 'static, + CAW: CanAuthorWith + Clone + Send + 'static, { if let Err(_) = register_pow_inherent_data_provider(&inherent_data_providers) { warn!("Registering inherent data provider for timestamp failed"); } - thread::spawn(move || { - loop { - match mine_loop( - &mut block_import, - client.as_ref(), - &algorithm, - &mut env, - preruntime.as_ref(), - round, - &mut sync_oracle, - build_time.clone(), - select_chain.as_ref(), - &inherent_data_providers, - &can_author_with, - ) { - Ok(()) => (), - Err(e) => error!( - "Mining block failed with {:?}. Sleep for 1 second before restarting...", - e - ), - } - std::thread::sleep(std::time::Duration::new(1, 0)); - } - }); -} + let timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); + let worker = Arc::new(Mutex::new(MiningWorker:: { + build: None, + algorithm: algorithm.clone(), + block_import, + })); + let worker_ret = worker.clone(); + + let task = timer.for_each(move |()| { + let worker = worker.clone(); -fn mine_loop( - block_import: &mut BoxBlockImport>, - client: &C, - algorithm: &Algorithm, - env: &mut E, - preruntime: Option<&Vec>, - round: u32, - sync_oracle: &mut SO, - build_time: std::time::Duration, - select_chain: Option<&S>, - inherent_data_providers: &sp_inherents::InherentDataProviders, - can_author_with: &CAW, -) -> Result<(), Error> where - C: HeaderBackend + AuxStore + ProvideRuntimeApi, - Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, - E: Environment, - E::Proposer: Proposer>, - E::Error: std::fmt::Debug, - SO: SyncOracle, - S: SelectChain, - sp_api::TransactionFor: 'static, - CAW: CanAuthorWith, -{ - 'outer: loop { if sync_oracle.is_major_syncing() { debug!(target: "pow", "Skipping proposal due to sync."); - std::thread::sleep(std::time::Duration::new(1, 0)); - continue 'outer + worker.lock().on_major_syncing(); + return Either::Left(future::ready(())) } - let (best_hash, best_header) = match select_chain { - Some(select_chain) => { - let header = select_chain.best_chain() - .map_err(Error::BestHeaderSelectChain)?; - let hash = header.hash(); - (hash, header) - }, - None => { - let hash = client.info().best_hash; - let header = client.header(BlockId::Hash(hash)) - .map_err(Error::BestHeader)? - .ok_or(Error::NoBestHeader)?; - (hash, header) + let best_header = match select_chain.best_chain() { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to pull new block for authoring. \ + Select best chain error: {:?}", + err + ); + return Either::Left(future::ready(())) }, }; + let best_hash = best_header.hash(); if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { warn!( target: "pow", "Skipping proposal `can_author_with` returned: {} \ - Probably a node update is required!", + Probably a node update is required!", err, ); - std::thread::sleep(std::time::Duration::from_secs(1)); - continue 'outer + return Either::Left(future::ready(())) } - let proposer = futures::executor::block_on(env.init(&best_header)) - .map_err(|e| Error::Environment(format!("{:?}", e)))?; - - let inherent_data = inherent_data_providers - .create_inherent_data().map_err(Error::CreateInherents)?; - let mut inherent_digest = Digest::default(); - if let Some(preruntime) = &preruntime { - inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, preruntime.to_vec())); + if worker.lock().best_hash() == Some(best_hash) { + return Either::Left(future::ready(())) } - let proposal = futures::executor::block_on(proposer.propose( - inherent_data, - inherent_digest, - build_time.clone(), - RecordProof::No, - )).map_err(|e| Error::BlockProposingError(format!("{:?}", e)))?; - - let (header, body) = proposal.block.deconstruct(); - let (difficulty, seal) = { - let difficulty = algorithm.difficulty(best_hash)?; - - loop { - let seal = algorithm.mine( - &BlockId::Hash(best_hash), - &header.hash(), - difficulty, - round, - )?; - - if let Some(seal) = seal { - break (difficulty, seal) - } - if best_hash != client.info().best_hash { - continue 'outer - } - } + // The worker is locked for the duration of the whole proposing period. Within this period, + // the mining target is outdated and useless anyway. + + let difficulty = match algorithm.difficulty(best_hash) { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Fetch difficulty failed: {:?}", + err, + ); + return Either::Left(future::ready(())) + }, }; - log::info!("✅ Successfully mined block: {}", best_hash); - - let (hash, seal) = { - let seal = DigestItem::Seal(POW_ENGINE_ID, seal); - let mut header = header.clone(); - header.digest_mut().push(seal); - let hash = header.hash(); - let seal = header.digest_mut().pop() - .expect("Pushed one seal above; length greater than zero; qed"); - (hash, seal) + let awaiting_proposer = env.init(&best_header); + let inherent_data = match inherent_data_providers.create_inherent_data() { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating inherent data failed: {:?}", + err, + ); + return Either::Left(future::ready(())) + }, }; + let mut inherent_digest = Digest::::default(); + if let Some(pre_runtime) = &pre_runtime { + inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); + } - let intermediate = PowIntermediate:: { - difficulty: Some(difficulty), - }; + let pre_runtime = pre_runtime.clone(); + + Either::Right(async move { + let proposer = match awaiting_proposer.await { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating proposer failed: {:?}", + err, + ); + return + }, + }; + + let proposal = match proposer.propose( + inherent_data, + inherent_digest, + build_time.clone(), + RecordProof::No, + ).await { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating proposal failed: {:?}", + err, + ); + return + }, + }; + + let build = MiningBuild:: { + metadata: MiningMetadata { + best_hash, + pre_hash: proposal.block.header().hash(), + pre_runtime: pre_runtime.clone(), + difficulty, + }, + proposal, + }; - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(seal); - import_block.body = Some(body); - import_block.storage_changes = Some(proposal.storage_changes); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box - ); - import_block.post_hash = Some(hash); + worker.lock().on_build(build); + }) + }); - block_import.import_block(import_block, HashMap::default()) - .map_err(|e| Error::BlockBuiltError(best_hash, e))?; + (worker_ret, task) +} + +/// Find PoW pre-runtime. +fn find_pre_digest(header: &B::Header) -> Result>, Error> { + let mut pre_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); + match (log, pre_digest.is_some()) { + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { + return Err(Error::MultiplePreRuntimeDigests) + }, + (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { + pre_digest = Some(v.clone()); + }, + (_, _) => trace!(target: "pow", "Ignoring digest not meant for us"), + } + } + + Ok(pre_digest) +} + +/// Fetch PoW seal. +fn fetch_seal( + digest: Option<&DigestItem>, + hash: B::Hash, +) -> Result, Error> { + match digest { + Some(DigestItem::Seal(id, seal)) => { + if id == &POW_ENGINE_ID { + Ok(seal.clone()) + } else { + return Err(Error::::WrongEngine(*id).into()) + } + }, + _ => return Err(Error::::HeaderUnsealed(hash).into()), } } diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ed863dcd9ed986e6045ac1a96061a03b030a3b7 --- /dev/null +++ b/client/consensus/pow/src/worker.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{pin::Pin, time::Duration, collections::HashMap, any::Any, borrow::Cow}; +use sc_client_api::ImportNotifications; +use sp_runtime::{DigestItem, traits::Block as BlockT, generic::BlockId}; +use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, import_queue::BoxBlockImport}; +use futures::{prelude::*, task::{Context, Poll}}; +use futures_timer::Delay; +use log::*; + +use crate::{INTERMEDIATE_KEY, POW_ENGINE_ID, Seal, PowAlgorithm, PowIntermediate}; + +/// Mining metadata. This is the information needed to start an actual mining loop. +#[derive(Clone, Eq, PartialEq)] +pub struct MiningMetadata { + /// Currently known best hash which the pre-hash is built on. + pub best_hash: H, + /// Mining pre-hash. + pub pre_hash: H, + /// Pre-runtime digest item. + pub pre_runtime: Option>, + /// Mining target difficulty. + pub difficulty: D, +} + +/// A build of mining, containing the metadata and the block proposal. +pub struct MiningBuild, C: sp_api::ProvideRuntimeApi> { + /// Mining metadata. + pub metadata: MiningMetadata, + /// Mining proposal. + pub proposal: Proposal>, +} + +/// Mining worker that exposes structs to query the current mining build and submit mined blocks. +pub struct MiningWorker, C: sp_api::ProvideRuntimeApi> { + pub(crate) build: Option>, + pub(crate) algorithm: Algorithm, + pub(crate) block_import: BoxBlockImport>, +} + +impl MiningWorker where + Block: BlockT, + C: sp_api::ProvideRuntimeApi, + Algorithm: PowAlgorithm, + Algorithm::Difficulty: 'static, +{ + /// Get the current best hash. `None` if the worker has just started or the client is doing + /// major syncing. + pub fn best_hash(&self) -> Option { + self.build.as_ref().map(|b| b.metadata.best_hash) + } + + pub(crate) fn on_major_syncing(&mut self) { + self.build = None; + } + + pub(crate) fn on_build( + &mut self, + build: MiningBuild, + ) { + self.build = Some(build); + } + + /// Get a copy of the current mining metadata, if available. + pub fn metadata(&self) -> Option> { + self.build.as_ref().map(|b| b.metadata.clone()) + } + + /// Submit a mined seal. The seal will be validated again. Returns true if the submission is + /// successful. + pub fn submit(&mut self, seal: Seal) -> bool { + if let Some(build) = self.build.take() { + match self.algorithm.verify( + &BlockId::Hash(build.metadata.best_hash), + &build.metadata.pre_hash, + build.metadata.pre_runtime.as_ref().map(|v| &v[..]), + &seal, + build.metadata.difficulty, + ) { + Ok(true) => (), + Ok(false) => { + warn!( + target: "pow", + "Unable to import mined block: seal is invalid", + ); + return false + }, + Err(err) => { + warn!( + target: "pow", + "Unable to import mined block: {:?}", + err, + ); + return false + }, + } + + let seal = DigestItem::Seal(POW_ENGINE_ID, seal); + let (header, body) = build.proposal.block.deconstruct(); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(seal); + import_block.body = Some(body); + import_block.storage_changes = Some(build.proposal.storage_changes); + + let intermediate = PowIntermediate:: { + difficulty: Some(build.metadata.difficulty), + }; + + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(intermediate) as Box + ); + + match self.block_import.import_block(import_block, HashMap::default()) { + Ok(_) => { + info!( + target: "pow", + "✅ Successfully mined block on top of: {}", + build.metadata.best_hash + ); + true + }, + Err(err) => { + warn!( + target: "pow", + "Unable to import mined block: {:?}", + err, + ); + false + }, + } + } else { + warn!( + target: "pow", + "Unable to import mined block: build does not exist", + ); + false + } + } +} + +/// A stream that waits for a block import or timeout. +pub struct UntilImportedOrTimeout { + import_notifications: ImportNotifications, + timeout: Duration, + inner_delay: Option, +} + +impl UntilImportedOrTimeout { + /// Create a new stream using the given import notification and timeout duration. + pub fn new( + import_notifications: ImportNotifications, + timeout: Duration, + ) -> Self { + Self { + import_notifications, + timeout, + inner_delay: None, + } + } +} + +impl Stream for UntilImportedOrTimeout { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut fire = false; + + loop { + match Stream::poll_next(Pin::new(&mut self.import_notifications), cx) { + Poll::Pending => break, + Poll::Ready(Some(_)) => { + fire = true; + }, + Poll::Ready(None) => return Poll::Ready(None), + } + } + + let timeout = self.timeout.clone(); + let inner_delay = self.inner_delay.get_or_insert_with(|| Delay::new(timeout)); + + match Future::poll(Pin::new(inner_delay), cx) { + Poll::Pending => (), + Poll::Ready(()) => { + fire = true; + }, + } + + if fire { + self.inner_delay = None; + Poll::Ready(Some(())) + } else { + Poll::Pending + } + } +} diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 9fe82d85053650b449359eb8da9a63b3e03e55e5..3a636360e795da042e95c6d38bfdb92e16dfe462 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" edition = "2018" @@ -8,27 +8,28 @@ build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../../primitives/application-crypto" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus-slots = { version = "0.8.0-rc5", path = "../../../primitives/consensus/slots" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sc-telemetry = { version = "2.0.0-rc5", path = "../../telemetry" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-consensus-slots = { version = "0.8.0", path = "../../../primitives/consensus/slots" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sc-telemetry = { version = "2.0.0", path = "../../telemetry" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/README.md b/client/consensus/slots/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9ab3c3742f33076b46be61efbf3c3e5fd824b50c --- /dev/null +++ b/client/consensus/slots/README.md @@ -0,0 +1,7 @@ +Slots functionality for Substrate. + +Some consensus algorithms have a concept of *slots*, which are intervals in +time during which certain events can and/or must occur. This crate +provides generic functionality for slots. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index 757daeea55170f4196525ec392d1b8031fcaef01..bb23c829a6e094f2f7a401fce8eb50ca00dd5179 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -1,21 +1,22 @@ [package] name = "sc-consensus-uncles" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-authorship = { version = "2.0.0-rc5", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-authorship = { version = "2.0.0", path = "../../../primitives/authorship" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } log = "0.4.8" diff --git a/client/consensus/uncles/README.md b/client/consensus/uncles/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b6fed5b9772a69eef4abcd2997167e13e290a32 --- /dev/null +++ b/client/consensus/uncles/README.md @@ -0,0 +1,3 @@ +Uncles functionality for Substrate. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 50e14fcaae602a61f6cc56b530d0433d4e74d84e..70a0b19532593cb689c14320d7f44eb5906cf3d0 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-client-db" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Client backend that uses RocksDB database as storage." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] parking_lot = "0.10.0" log = "0.4.8" kvdb = "0.7.0" -kvdb-rocksdb = { version = "0.9", optional = true } +kvdb-rocksdb = { version = "0.9.1", optional = true } kvdb-memorydb = "0.7.0" linked-hash-map = "0.5.2" hash-db = "0.15.2" @@ -23,26 +24,26 @@ parity-util-mem = { version = "0.7.0", default-features = false, features = ["st codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } blake2-rfc = "0.2.18" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sc-state-db = { version = "0.8.0-rc5", path = "../state-db" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-database = { version = "2.0.0-rc5", path = "../../primitives/database" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8.0", path = "../executor" } +sc-state-db = { version = "0.8.0", path = "../state-db" } +sp-trie = { version = "2.0.0", path = "../../primitives/trie" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-database = { version = "2.0.0", path = "../../primitives/database" } parity-db = { version = "0.1.2", optional = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc5", path = "../../utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } [dev-dependencies] -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } -env_logger = "0.7.0" +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } quickcheck = "0.9" -kvdb-rocksdb = "0.9" +kvdb-rocksdb = "0.9.1" tempfile = "3" [features] @@ -50,4 +51,3 @@ default = [] test-helpers = [] with-kvdb-rocksdb = ["kvdb-rocksdb"] with-parity-db = ["parity-db"] -with-subdb = [] diff --git a/client/db/README.md b/client/db/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5fb3fce1d9763d92554b3d0c6262e4e3cfccd66 --- /dev/null +++ b/client/db/README.md @@ -0,0 +1,11 @@ +Client backend that is backed by a database. + +# Canonicality vs. Finality + +Finality indicates that a block will not be reverted, according to the consensus algorithm, +while canonicality indicates that the block may be reverted, but we will be unable to do so, +having discarded heavy state that will allow a chain reorganization. + +Finality implies canonicality but not vice-versa. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index c3bed3e24f617477e838ae271b2452162c3cbad3..f3c8f1aff9e14fc49613a7a5851dc675f8a5468f 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -24,10 +24,15 @@ use std::collections::HashMap; use hash_db::{Prefix, Hasher}; use sp_trie::{MemoryDB, prefixed_key}; -use sp_core::{storage::ChildInfo, hexdisplay::HexDisplay}; +use sp_core::{ + storage::{ChildInfo, TrackedStorageKey}, + hexdisplay::HexDisplay +}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::Backend as StateBackend, StorageCollection}; +use sp_state_machine::{ + DBValue, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection +}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; @@ -93,9 +98,13 @@ pub struct BenchmarkingState { genesis: HashMap, (Vec, i32)>, record: Cell>>, shared_cache: SharedCache, // shared cache is always empty - key_tracker: RefCell, KeyTracker>>, + /// Key tracker for keys in the main trie. + main_key_tracker: RefCell, KeyTracker>>, + /// Key tracker for keys in a child trie. + /// Child trie are identified by their storage key (i.e. `ChildInfo::storage_key()`) + child_key_tracker: RefCell, HashMap, KeyTracker>>>, read_write_tracker: RefCell, - whitelist: RefCell>>, + whitelist: RefCell>, } impl BenchmarkingState { @@ -113,7 +122,8 @@ impl BenchmarkingState { genesis_root: Default::default(), record: Default::default(), shared_cache: new_shared_cache(0, (1, 10)), - key_tracker: Default::default(), + main_key_tracker: Default::default(), + child_key_tracker: Default::default(), read_write_tracker: Default::default(), whitelist: Default::default(), }; @@ -131,7 +141,7 @@ impl BenchmarkingState { ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); - state.commit(root, transaction, Vec::new())?; + state.commit(root, transaction, Vec::new(), Vec::new())?; state.record.take(); Ok(state) } @@ -153,62 +163,87 @@ impl BenchmarkingState { } fn add_whitelist_to_tracker(&self) { - let mut key_tracker = self.key_tracker.borrow_mut(); - - let whitelisted = KeyTracker { - has_been_read: true, - has_been_written: true, - }; + let mut main_key_tracker = self.main_key_tracker.borrow_mut(); let whitelist = self.whitelist.borrow(); whitelist.iter().for_each(|key| { - key_tracker.insert(key.to_vec(), whitelisted); + let whitelisted = KeyTracker { + has_been_read: key.has_been_read, + has_been_written: key.has_been_written, + }; + main_key_tracker.insert(key.key.clone(), whitelisted); }); } fn wipe_tracker(&self) { - *self.key_tracker.borrow_mut() = HashMap::new(); + *self.main_key_tracker.borrow_mut() = HashMap::new(); + *self.child_key_tracker.borrow_mut() = HashMap::new(); self.add_whitelist_to_tracker(); *self.read_write_tracker.borrow_mut() = Default::default(); } - fn add_read_key(&self, key: &[u8]) { - log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key)); - - let mut key_tracker = self.key_tracker.borrow_mut(); + // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) + fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + let mut child_key_tracker = self.child_key_tracker.borrow_mut(); + let mut main_key_tracker = self.main_key_tracker.borrow_mut(); - let maybe_tracker = key_tracker.get(key); - - let has_been_read = KeyTracker { - has_been_read: true, - has_been_written: false, + let key_tracker = if let Some(childtrie) = childtrie { + child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) + } else { + &mut main_key_tracker }; - match maybe_tracker { + let read = match key_tracker.get(key) { None => { + let has_been_read = KeyTracker { + has_been_read: true, + has_been_written: false, + }; key_tracker.insert(key.to_vec(), has_been_read); read_write_tracker.add_read(); + true }, Some(tracker) => { if !tracker.has_been_read { + let has_been_read = KeyTracker { + has_been_read: true, + has_been_written: tracker.has_been_written, + }; key_tracker.insert(key.to_vec(), has_been_read); read_write_tracker.add_read(); + true } else { read_write_tracker.add_repeat_read(); + false } } + }; + + if read { + if let Some(childtrie) = childtrie { + log::trace!( + target: "benchmark", + "Childtrie Read: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key) + ); + } else { + log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key)); + } } } - fn add_write_key(&self, key: &[u8]) { - log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key)); - - let mut key_tracker = self.key_tracker.borrow_mut(); + // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) + fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + let mut child_key_tracker = self.child_key_tracker.borrow_mut(); + let mut main_key_tracker = self.main_key_tracker.borrow_mut(); - let maybe_tracker = key_tracker.get(key); + let key_tracker = if let Some(childtrie) = childtrie { + child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) + } else { + &mut main_key_tracker + }; // If we have written to the key, we also consider that we have read from it. let has_been_written = KeyTracker { @@ -216,19 +251,33 @@ impl BenchmarkingState { has_been_written: true, }; - match maybe_tracker { + let write = match key_tracker.get(key) { None => { key_tracker.insert(key.to_vec(), has_been_written); read_write_tracker.add_write(); + true }, Some(tracker) => { if !tracker.has_been_written { key_tracker.insert(key.to_vec(), has_been_written); read_write_tracker.add_write(); + true } else { read_write_tracker.add_repeat_write(); + false } } + }; + + if write { + if let Some(childtrie) = childtrie { + log::trace!( + target: "benchmark", + "Childtrie Write: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key) + ); + } else { + log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key)); + } } } } @@ -243,12 +292,12 @@ impl StateBackend> for BenchmarkingState { type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key) } fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key) } @@ -257,12 +306,12 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(Some(child_info.storage_key()), key); self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) } @@ -271,12 +320,12 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result { - self.add_read_key(key); + self.add_read_key(Some(child_info.storage_key()), key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.next_storage_key(key) } @@ -285,7 +334,7 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.add_read_key(key); + self.add_read_key(Some(child_info.storage_key()), key); self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) } @@ -362,9 +411,9 @@ impl StateBackend> for BenchmarkingState { fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction, - storage_changes: StorageCollection, - ) -> Result<(), Self::Error> - { + main_storage_changes: StorageCollection, + child_storage_changes: ChildStorageCollection, + ) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); let changes = transaction.drain(); @@ -385,8 +434,13 @@ impl StateBackend> for BenchmarkingState { self.db.set(Some(db)); // Track DB Writes - storage_changes.iter().for_each(|(key, _)| { - self.add_write_key(key); + main_storage_changes.iter().for_each(|(key, _)| { + self.add_write_key(None, key); + }); + child_storage_changes.iter().for_each(|(child_storage_key, storage_changes)| { + storage_changes.iter().for_each(|(key, _)| { + self.add_write_key(Some(child_storage_key), key); + }) }); } else { return Err("Trying to commit to a closed db".into()) @@ -426,7 +480,11 @@ impl StateBackend> for BenchmarkingState { self.wipe_tracker() } - fn set_whitelist(&self, new: Vec>) { + fn get_whitelist(&self) -> Vec { + self.whitelist.borrow().to_vec() + } + + fn set_whitelist(&self, new: Vec) { *self.whitelist.borrow_mut() = new; } @@ -444,3 +502,47 @@ impl std::fmt::Debug for BenchmarkingState { write!(f, "Bench DB") } } + +#[cfg(test)] +mod test { + use crate::bench::BenchmarkingState; + use sp_state_machine::backend::Backend as _; + + #[test] + fn read_to_main_and_child_tries() { + let bench_state = BenchmarkingState::::new(Default::default(), None) + .unwrap(); + + for _ in 0..2 { + let child1 = sp_core::storage::ChildInfo::new_default(b"child1"); + let child2 = sp_core::storage::ChildInfo::new_default(b"child2"); + + bench_state.storage(b"foo").unwrap(); + bench_state.child_storage(&child1, b"foo").unwrap(); + bench_state.child_storage(&child2, b"foo").unwrap(); + + bench_state.storage(b"bar").unwrap(); + bench_state.child_storage(&child1, b"bar").unwrap(); + bench_state.child_storage(&child2, b"bar").unwrap(); + + bench_state.commit( + Default::default(), + Default::default(), + vec![ + ("foo".as_bytes().to_vec(), None) + ], + vec![ + ("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)]) + ] + ).unwrap(); + + let rw_tracker = bench_state.read_write_tracker.borrow(); + assert_eq!(rw_tracker.reads, 6); + assert_eq!(rw_tracker.repeat_reads, 0); + assert_eq!(rw_tracker.writes, 2); + assert_eq!(rw_tracker.repeat_writes, 0); + drop(rw_tracker); + bench_state.wipe().unwrap(); + } + } +} diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d854c80bf35351e6618796d7ac12ada24dddd766..8196a750557a8484ce72fc906b81a9301b1fe808 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -44,8 +44,6 @@ mod utils; mod stats; #[cfg(feature = "with-parity-db")] mod parity_db; -#[cfg(feature = "with-subdb")] -mod subdb; use std::sync::Arc; use std::path::{Path, PathBuf}; @@ -54,8 +52,8 @@ use std::collections::{HashMap, HashSet}; use sc_client_api::{ UsageInfo, MemoryInfo, IoInfo, MemorySize, - backend::{NewBlockState, PrunableStateChangesTrieStorage}, - leaves::{LeafSet, FinalizationDisplaced}, + backend::{NewBlockState, PrunableStateChangesTrieStorage, ProvideChtRoots}, + leaves::{LeafSet, FinalizationDisplaced}, cht, }; use sp_blockchain::{ Result as ClientResult, Error as ClientError, @@ -70,7 +68,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::offchain::storage::{OffchainOverlayedChange, OffchainOverlayedChanges}; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_arithmetic::traits::Saturating; -use sp_runtime::{generic::BlockId, Justification, Storage}; +use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Storage}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, }; @@ -287,12 +285,6 @@ pub enum DatabaseSettingsSrc { path: PathBuf, }, - /// Load a Subdb database from a given path. - SubDb { - /// Path to the database. - path: PathBuf, - }, - /// Use a custom already-open database. Custom(Arc>), } @@ -303,7 +295,6 @@ impl DatabaseSettingsSrc { match self { DatabaseSettingsSrc::RocksDb { path, .. } => Some(path.as_path()), DatabaseSettingsSrc::ParityDb { path, .. } => Some(path.as_path()), - DatabaseSettingsSrc::SubDb { path, .. } => Some(path.as_path()), DatabaseSettingsSrc::Custom(_) => None, } } @@ -321,7 +312,6 @@ impl std::fmt::Display for DatabaseSettingsSrc { let name = match self { DatabaseSettingsSrc::RocksDb { .. } => "RocksDb", DatabaseSettingsSrc::ParityDb { .. } => "ParityDb", - DatabaseSettingsSrc::SubDb { .. } => "SubDb", DatabaseSettingsSrc::Custom(_) => "Custom", }; write!(f, "{}", name) @@ -405,6 +395,14 @@ impl BlockchainDb { meta.finalized_hash = hash; } } + + // Get block changes trie root, if available. + fn changes_trie_root(&self, block: BlockId) -> ClientResult> { + self.header(block) + .map(|header| header.and_then(|header| + header.digest().log(DigestItem::as_changes_trie_root) + .cloned())) + } } impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { @@ -525,6 +523,58 @@ impl HeaderMetadata for BlockchainDb { } } +impl ProvideChtRoots for BlockchainDb { + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + let cht_number = match cht::block_to_cht_number(cht_size, block) { + Some(number) => number, + None => return Ok(None), + }; + + let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); + + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + + cht::compute_root::, _>( + cht::size(), cht_number, cht_range.map(|num| self.hash(num)) + ).map(Some) + } + + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + let cht_number = match cht::block_to_cht_number(cht_size, block) { + Some(number) => number, + None => return Ok(None), + }; + + let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); + + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + + cht::compute_root::, _>( + cht::size(), + cht_number, + cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), + ).map(Some) + } +} + /// Database transaction pub struct BlockImportOperation { old_state: SyncingCachingState, Block>, @@ -1908,7 +1958,7 @@ pub(crate) mod tests { #[test] fn delete_only_when_negative_rc() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let key; let backend = Backend::::new_test(1, 0); @@ -2329,4 +2379,29 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap_err(); } } + + #[test] + fn header_cht_root_works() { + use sc_client_api::ProvideChtRoots; + + let backend = Backend::::new_test(10, 10); + + // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created + let mut prev_hash = insert_header(&backend, 0, Default::default(), None, Default::default()); + let cht_size: u64 = cht::size(); + for i in 1..1 + cht_size + cht_size + 1 { + prev_hash = insert_header(&backend, i, prev_hash, None, Default::default()); + } + + let blockchain = backend.blockchain(); + + let cht_root_1 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap().unwrap(); + let cht_root_2 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap().unwrap(); + let cht_root_3 = blockchain.header_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap().unwrap(); + assert_eq!(cht_root_1, cht_root_2); + assert_eq!(cht_root_2, cht_root_3); + } } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index 139ecf3b22c72810888dbf7f72ffb6592eabdbfe..acfb6217ce9e034dd66324e34642aa78eb0a8a95 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -23,11 +23,11 @@ use std::convert::TryInto; use parking_lot::RwLock; use sc_client_api::{ - cht, backend::{AuxStore, NewBlockState}, UsageInfo, + cht, backend::{AuxStore, NewBlockState, ProvideChtRoots}, UsageInfo, blockchain::{ BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo, }, - Storage + Storage, }; use sp_blockchain::{ CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache, @@ -523,22 +523,6 @@ impl Storage for LightStorage } } - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) - } - fn finalize_header(&self, id: BlockId) -> ClientResult<()> { if let Some(header) = self.header(id)? { let mut transaction = Transaction::new(); @@ -612,6 +596,26 @@ impl Storage for LightStorage } } +impl ProvideChtRoots for LightStorage + where Block: BlockT, +{ + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult> { + self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) + } + + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult> { + self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) + } +} + /// Build the key for inserting header-CHT at given block. fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { let mut key = [cht_type; 5]; diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 7085aa3bf8ccde1495627340352e24d4f9992a14..313069706f33f5128004ada17d58ab241855227f 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -18,7 +18,7 @@ /// A `Database` adapter for parity-db. use sp_database::{Database, Change, ColumnId, Transaction, error::DatabaseError}; -use crate::utils::NUM_COLUMNS; +use crate::utils::{DatabaseType, NUM_COLUMNS}; use crate::columns; struct DbAdapter(parity_db::Db); @@ -32,13 +32,17 @@ fn handle_err(result: parity_db::Result) -> T { } } -/// Wrap RocksDb database into a trait object that implements `sp_database::Database` -pub fn open(path: &std::path::Path) -> parity_db::Result>> { +/// Wrap parity-db database into a trait object that implements `sp_database::Database` +pub fn open(path: &std::path::Path, db_type: DatabaseType) + -> parity_db::Result>> +{ let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8); - let mut state_col = &mut config.columns[columns::STATE as usize]; - state_col.ref_counted = true; - state_col.preimage = true; - state_col.uniform = true; + if db_type == DatabaseType::Full { + let mut state_col = &mut config.columns[columns::STATE as usize]; + state_col.ref_counted = true; + state_col.preimage = true; + state_col.uniform = true; + } let db = parity_db::Db::open(&config)?; Ok(std::sync::Arc::new(DbAdapter(db))) } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 434b301ed6240e94539cbf5d3c06265231639039..0b4b6d4f88ef507bb4da1c7faa5725bcd2550b20 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1024,7 +1024,7 @@ mod tests { #[test] fn simple_fork() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let root_parent = H256::random(); let key = H256::random()[..].to_vec(); @@ -1245,7 +1245,7 @@ mod tests { #[test] fn fix_storage_mismatch_issue() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let root_parent = H256::random(); let key = H256::random()[..].to_vec(); diff --git a/client/db/src/subdb.rs b/client/db/src/subdb.rs deleted file mode 100644 index 2f72632b045622fcd035e50f7560c2c4b0fe753c..0000000000000000000000000000000000000000 --- a/client/db/src/subdb.rs +++ /dev/null @@ -1,88 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -/// A `Database` adapter for subdb. - -use sp_database::{self, ColumnId}; -use parking_lot::RwLock; -use blake2_rfc::blake2b::blake2b; -use codec::Encode; -use subdb::{Database, KeyType}; - -/// A database hidden behind an RwLock, so that it implements Send + Sync. -/// -/// Construct by creating a `Database` and then using `.into()`. -pub struct DbAdapter(RwLock>); - -/// Wrap RocksDb database into a trait object that implements `sp_database::Database` -pub fn open( - path: &std::path::Path, - _num_columns: u32, -) -> Result>, subdb::Error> { - let db = subdb::Options::from_path(path.into()).open()?; - Ok(std::sync::Arc::new(DbAdapter(RwLock::new(db)))) -} - -impl sp_database::Database for DbAdapter { - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - self.0.read().get(&hash) - } - - fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - let _ = self.0.read().get_ref(&hash).map(|d| f(d.as_ref())); - } - - fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - self.0.write().insert(&value, &hash); - } - - fn remove(&self, col: ColumnId, key: &[u8]) { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - let _ = self.0.write().remove(&hash); - } - - fn lookup(&self, hash: &H) -> Option> { - self.0.read().get(hash) - } - - fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { - let _ = self.0.read().get_ref(hash).map(|d| f(d.as_ref())); - } - - fn store(&self, hash: &H, preimage: &[u8]) { - self.0.write().insert(preimage, hash); - } - - fn release(&self, hash: &H) { - let _ = self.0.write().remove(hash); - } -} diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 168ab9bbb71f633706daee5c4e8b47f8088b1f35..e999469c18ff0c96f40580f0ed1192aae5a5268a 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -212,11 +212,12 @@ pub fn open_database( config: &DatabaseSettings, db_type: DatabaseType, ) -> sp_blockchain::Result>> { - let db_open_error = |feat| Err( + #[allow(unused)] + fn db_open_error(feat: &'static str) -> sp_blockchain::Error { sp_blockchain::Error::Backend( format!("`{}` feature not enabled, database can not be opened", feat), - ), - ); + ) + } let db: Arc> = match &config.source { #[cfg(any(feature = "with-kvdb-rocksdb", test))] @@ -226,56 +227,62 @@ pub fn open_database( // and now open database assuming that it has the latest version let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); - let state_col_budget = (*cache_size as f64 * 0.9) as usize; - let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); - let mut memory_budget = std::collections::HashMap::new(); let path = path.to_str() .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; - for i in 0..NUM_COLUMNS { - if i == crate::columns::STATE { - memory_budget.insert(i, state_col_budget); - } else { - memory_budget.insert(i, other_col_budget); + let mut memory_budget = std::collections::HashMap::new(); + match db_type { + DatabaseType::Full => { + let state_col_budget = (*cache_size as f64 * 0.9) as usize; + let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); + + for i in 0..NUM_COLUMNS { + if i == crate::columns::STATE { + memory_budget.insert(i, state_col_budget); + } else { + memory_budget.insert(i, other_col_budget); + } + } + log::trace!( + target: "db", + "Open RocksDB database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", + path, + state_col_budget, + NUM_COLUMNS, + other_col_budget, + ); + }, + DatabaseType::Light => { + let col_budget = cache_size / (NUM_COLUMNS as usize); + for i in 0..NUM_COLUMNS { + memory_budget.insert(i, col_budget); + } + log::trace!( + target: "db", + "Open RocksDB light database at {}, column cache: {} MiB", + path, + col_budget, + ); } } - db_config.memory_budget = memory_budget; - log::trace!( - target: "db", - "Open RocksDB database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", - path, - state_col_budget, - NUM_COLUMNS, - other_col_budget, - ); - let db = kvdb_rocksdb::Database::open(&db_config, &path) .map_err(|err| sp_blockchain::Error::Backend(format!("{}", err)))?; sp_database::as_database(db) }, #[cfg(not(any(feature = "with-kvdb-rocksdb", test)))] DatabaseSettingsSrc::RocksDb { .. } => { - return db_open_error("with-kvdb-rocksdb"); - }, - #[cfg(feature = "with-subdb")] - DatabaseSettingsSrc::SubDb { path } => { - crate::subdb::open(&path, NUM_COLUMNS) - .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? - }, - #[cfg(not(feature = "with-subdb"))] - DatabaseSettingsSrc::SubDb { .. } => { - return db_open_error("with-subdb"); + return Err(db_open_error("with-kvdb-rocksdb")); }, #[cfg(feature = "with-parity-db")] DatabaseSettingsSrc::ParityDb { path } => { - crate::parity_db::open(&path) + crate::parity_db::open(&path, db_type) .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? }, #[cfg(not(feature = "with-parity-db"))] DatabaseSettingsSrc::ParityDb { .. } => { - return db_open_error("with-parity-db"); + return Err(db_open_error("with-parity-db")) }, DatabaseSettingsSrc::Custom(db) => db.clone(), }; diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index f59c89a9d70e379559f05b149a02e8c1e83d7bad..3220ae7161b7aba31165050992cf532a005e7b9e 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "A crate that provides means of executing/dispatching calls into the runtime." documentation = "https://docs.rs/sc-executor" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,38 +16,39 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-serializer = { version = "2.0.0-rc5", path = "../../primitives/serializer" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../../primitives/panic-handler" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-trie = { version = "2.0.0", path = "../../primitives/trie" } +sp-serializer = { version = "2.0.0", path = "../../primitives/serializer" } +sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } wasmi = "0.6.2" parity-wasm = "0.41.0" lazy_static = "1.4.0" -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } -sc-executor-common = { version = "0.8.0-rc5", path = "common" } -sc-executor-wasmi = { version = "0.8.0-rc5", path = "wasmi" } -sc-executor-wasmtime = { version = "0.8.0-rc5", path = "wasmtime", optional = true } +sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface" } +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.8.0", path = "common" } +sc-executor-wasmi = { version = "0.8.0", path = "wasmi" } +sc-executor-wasmtime = { version = "0.8.0", path = "wasmtime", optional = true } parking_lot = "0.10.0" log = "0.4.8" libsecp256k1 = "0.3.4" [dev-dependencies] assert_matches = "1.3.0" -wabt = "0.9.2" -hex-literal = "0.2.1" -sc-runtime-test = { version = "2.0.0-rc5", path = "runtime-test" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +wat = "1.0" +hex-literal = "0.3.1" +sc-runtime-test = { version = "2.0.0", path = "runtime-test" } +substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } test-case = "0.3.3" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", path = "../../primitives/tracing" } -sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } -tracing = "0.1.18" +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sc-tracing = { version = "2.0.0", path = "../tracing" } +tracing = "0.1.19" +tracing-subscriber = "0.2.10" [features] default = [ "std" ] diff --git a/client/executor/README.md b/client/executor/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ab7b3d45206f9fc4ca4e0323dcc5b27bbf943226 --- /dev/null +++ b/client/executor/README.md @@ -0,0 +1,13 @@ +A crate that provides means of executing/dispatching calls into the runtime. + +There are a few responsibilities of this crate at the moment: + +- It provides an implementation of a common entrypoint for calling into the runtime, both +wasm and compiled. +- It defines the environment for the wasm execution, namely the host functions that are to be +provided into the wasm runtime module. +- It also provides the required infrastructure for executing the current wasm runtime (specified +by the current value of `:code` in the provided externalities), i.e. interfacing with +wasm engine used, instance cache. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 2189d89b12d711eb04d5e58d48bd579639e12b8a..64ed23598f47c6a955ae6d9b084217896a7b3d98 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-common" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "A set of common definitions that are needed for defining execution engines." documentation = "https://docs.rs/sc-executor-common/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,11 +19,11 @@ derive_more = "0.99.2" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.3.4" } wasmi = "0.6.2" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } -sp-serializer = { version = "2.0.0-rc5", path = "../../../primitives/serializer" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } +sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } +sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } [features] default = [] diff --git a/client/executor/common/README.md b/client/executor/common/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0c0d3bf08bcb20519013a428ee4daf9ce6c1a622 --- /dev/null +++ b/client/executor/common/README.md @@ -0,0 +1,3 @@ +A set of common definitions that are needed for defining execution engines. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 9645dd90694b691230cdff6944768e6d17550cef..4bd90176f5ecd49c75d9a02d6df99e1fa102de60 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-runtime-test" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,12 +13,12 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/io" } -sp-sandbox = { version = "0.8.0-rc5", default-features = false, path = "../../../primitives/sandbox" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-allocator = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/allocator" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } +sp-sandbox = { version = "0.8.0", default-features = false, path = "../../../primitives/sandbox" } +sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-allocator = { version = "2.0.0", default-features = false, path = "../../../primitives/allocator" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } @@ -29,5 +29,7 @@ std = [ "sp-io/std", "sp-sandbox/std", "sp-std/std", + "sp-core/std", + "sp-runtime/std", "sp-allocator/std", ] diff --git a/client/executor/runtime-test/build.rs b/client/executor/runtime-test/build.rs index 1ed5aa44bc5c4673ad031190e2f36715907a4e00..cf4fca01acd9f7eb75d76445c69c82fea401ed06 100644 --- a/client/executor/runtime-test/build.rs +++ b/client/executor/runtime-test/build.rs @@ -17,10 +17,21 @@ use wasm_builder_runner::WasmBuilder; fn main() { + // regular build WasmBuilder::new() .with_current_project() .with_wasm_builder_from_crates_or_path("2.0.0", "../../../utils/wasm-builder") .export_heap_base() .import_memory() - .build() + .build(); + + // and building with tracing activated + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates_or_path("2.0.0", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .set_file_name("wasm_binary_with_tracing.rs") + .append_to_rust_flags("--cfg feature=\\\"with-tracing\\\"") + .build(); } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index a80ee1d6ba40f7db952973da782cc5a504c3bc2f..04397afd776d7b54c377a163c0868d6d6244b8b0 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -254,11 +254,11 @@ sp_core::wasm_export_functions! { } fn test_enter_span() -> u64 { - wasm_tracing::enter_span("integration_test_span_target", "integration_test_span_name") + wasm_tracing::enter_span(Default::default()) } fn test_exit_span(span_id: u64) { - wasm_tracing::exit_span(span_id) + wasm_tracing::exit(span_id) } fn returns_mutable_static() -> u64 { diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index a9ac0d0f30c237510ab9b0a16595ff1399b65945..3ff676fdbe61f3822306c658cde3789b63101957 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -30,6 +30,7 @@ use test_case::test_case; use sp_trie::{TrieConfiguration, trie_types::Layout}; use sp_wasm_interface::HostFunctions as _; use sp_runtime::traits::BlakeTwo256; +use tracing_subscriber::layer::SubscriberExt; use crate::WasmExecutionMethod; @@ -678,57 +679,20 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { let handler = TestTraceHandler(traces.clone()); // Create subscriber with wasm_tracing disabled - let test_subscriber = sc_tracing::ProfilingSubscriber::new_with_handler( - Box::new(handler), "integration_test_span_target"); + let test_subscriber = tracing_subscriber::fmt().finish().with( + sc_tracing::ProfilingLayer::new_with_handler( + Box::new(handler), "default" + ) + ); let _guard = tracing::subscriber::set_default(test_subscriber); let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - // Test tracing disabled - assert!(!sp_tracing::wasm_tracing_enabled()); - - let span_id = call_in_wasm( - "test_enter_span", - &[], - wasm_method, - &mut ext, - ).unwrap(); - - assert_eq!( - 0u64.encode(), - span_id - ); - // Repeat to check span id always 0 when deactivated - let span_id = call_in_wasm( - "test_enter_span", - &[], - wasm_method, - &mut ext, - ).unwrap(); - - assert_eq!( - 0u64.encode(), - span_id - ); - - call_in_wasm( - "test_exit_span", - &span_id.encode(), - wasm_method, - &mut ext, - ).unwrap(); - // Check span has not been recorded - let len = traces.lock().unwrap().len(); - assert_eq!(len, 0); - - // Test tracing enabled - sp_tracing::set_wasm_tracing(true); - let span_id = call_in_wasm( "test_enter_span", - &[], + Default::default(), wasm_method, &mut ext, ).unwrap(); @@ -752,8 +716,7 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { let span_datum = traces.lock().unwrap().pop().unwrap(); let values = span_datum.values; - assert_eq!(span_datum.target, "integration_test_span_target"); - assert_eq!(span_datum.name, "integration_test_span_name"); + assert_eq!(span_datum.target, "default"); + assert_eq!(span_datum.name, ""); assert_eq!(values.bool_values.get("wasm").unwrap(), &true); - assert_eq!(values.bool_values.get("is_valid_trace").unwrap(), &true); } diff --git a/client/executor/src/integration_tests/sandbox.rs b/client/executor/src/integration_tests/sandbox.rs index f84e446b416c06c2f423d71103dbc1597ec85138..447e395c2fb084aa0644504138987a673c40614d 100644 --- a/client/executor/src/integration_tests/sandbox.rs +++ b/client/executor/src/integration_tests/sandbox.rs @@ -21,7 +21,6 @@ use crate::WasmExecutionMethod; use codec::Encode; use test_case::test_case; -use wabt; #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] @@ -29,7 +28,7 @@ fn sandbox_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -67,7 +66,7 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (import "env" "assert" (func $assert (param i32))) (func (export "call") @@ -94,7 +93,7 @@ fn start_called(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -138,7 +137,7 @@ fn invoke_args(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (import "env" "assert" (func $assert (param i32))) @@ -178,7 +177,7 @@ fn return_val(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -206,7 +205,7 @@ fn unlinkable_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (import "env" "non-existent" (func)) @@ -252,7 +251,7 @@ fn start_fn_ok(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (func (export "call") ) @@ -281,7 +280,7 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (func (export "call") ) @@ -311,7 +310,7 @@ fn get_global_val_works(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (global (export "test_global") i64 (i64.const 500)) ) diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 74456f06671d33d779d83fd7510001ba342001a5..bf174bca2d4662ed394171385ab6ff42772dd6fa 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmi" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "This crate provides an implementation of `WasmRuntime` that is baked by wasmi." documentation = "https://docs.rs/sc-executor-wasmi" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" wasmi = "0.6.2" codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0-rc5", path = "../common" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } +sc-executor-common = { version = "0.8.0", path = "../common" } +sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmi/README.md b/client/executor/wasmi/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ad613aa1245e3378704e04f5f6a70070b6dc9ec1 --- /dev/null +++ b/client/executor/wasmi/README.md @@ -0,0 +1,3 @@ +This crate provides an implementation of `WasmModule` that is baked by wasmi. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 6eea4e6b14ab14030e79e15f6661074a9c982030..7a8aa1ff458f358590ef0c58b8c15600469b6379 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-executor-wasmtime" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Defines a `WasmRuntime` that uses the Wasmtime JIT to execute." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,11 +17,11 @@ log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0-rc5", path = "../common" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-rc5", path = "../../../primitives/allocator" } +sc-executor-common = { version = "0.8.0", path = "../common" } +sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } wasmtime = "0.19" pwasm-utils = "0.14.0" diff --git a/client/executor/wasmtime/README.md b/client/executor/wasmtime/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3e9ac0bddbdc17539d12893553789983e38ab05f --- /dev/null +++ b/client/executor/wasmtime/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7cd3548a7628cb3f84004af7e211ee5e994f06b9..fcefa2a04162eb208033ee0a9129024bd932fe48 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Integration of the GRANDPA finality gadget into substrate." documentation = "https://docs.rs/sc-finality-grandpa" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,46 +16,46 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -fork-tree = { version = "2.0.0-rc5", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" parking_lot = "0.10.0" rand = "0.7.2" parity-scale-codec = { version = "1.3.4", features = ["derive"] } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } -sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc5", path = "../../client/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } +sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } +sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0", path = "../../client/consensus/common" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } +sc-telemetry = { version = "2.0.0", path = "../telemetry" } +sc-keystore = { version = "2.0.0", path = "../keystore" } serde_json = "1.0.41" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-network-gossip = { version = "0.8.0-rc5", path = "../network-gossip" } -sp-finality-tracker = { version = "2.0.0-rc5", path = "../../primitives/finality-tracker" } -sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../primitives/finality-grandpa" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0", path = "../network" } +sc-network-gossip = { version = "0.8.0", path = "../network-gossip" } +sp-finality-tracker = { version = "2.0.0", path = "../../primitives/finality-tracker" } +sp-finality-grandpa = { version = "2.0.0", path = "../../primitives/finality-grandpa" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} +sc-block-builder = { version = "0.8.0", path = "../block-builder" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } pin-project = "0.4.6" [dev-dependencies] assert_matches = "1.3.0" finality-grandpa = { version = "0.12.3", features = ["derive-codec", "test-helpers"] } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-network-test = { version = "0.8.0-rc5", path = "../network/test" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -env_logger = "0.7.0" +sc-network = { version = "0.8.0", path = "../network" } +sc-network-test = { version = "0.8.0", path = "../network/test" } +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0", path = "../../primitives/consensus/babe" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } tokio = { version = "0.2", features = ["rt-core"] } tempfile = "3.1.0" -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } diff --git a/client/finality-grandpa/README.md b/client/finality-grandpa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..64a7e70bc6a52631016bba71343b38d3e0f9f005 --- /dev/null +++ b/client/finality-grandpa/README.md @@ -0,0 +1,39 @@ +Integration of the GRANDPA finality gadget into substrate. + +This crate is unstable and the API and usage may change. + +This crate provides a long-running future that produces finality notifications. + +# Usage + +First, create a block-import wrapper with the `block_import` function. The +GRANDPA worker needs to be linked together with this block import object, so +a `LinkHalf` is returned as well. All blocks imported (from network or +consensus or otherwise) must pass through this wrapper, otherwise consensus +is likely to break in unexpected ways. + +Next, use the `LinkHalf` and a local configuration to `run_grandpa_voter`. +This requires a `Network` implementation. The returned future should be +driven to completion and will finalize blocks in the background. + +# Changing authority sets + +The rough idea behind changing authority sets in GRANDPA is that at some point, +we obtain agreement for some maximum block height that the current set can +finalize, and once a block with that height is finalized the next set will +pick up finalization from there. + +Technically speaking, this would be implemented as a voting rule which says, +"if there is a signal for a change in N blocks in block B, only vote on +chains with length NUM(B) + N if they contain B". This conditional-inclusion +logic is complex to compute because it requires looking arbitrarily far +back in the chain. + +Instead, we keep track of a list of all signals we've seen so far (across +all forks), sorted ascending by the block number they would be applied at. +We never vote on chains with number higher than the earliest handoff block +number (this is num(signal) + N). When finalizing a block, we either apply +or prune any signaled changes based on whether the signaling block is +included in the newly-finalized chain. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index ca405eaec9dcbe068b54f885a844103b394c54a2..c0c2ea8b27d88a88599b83cd67f1d98b80f68890 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -1,36 +1,39 @@ [package] name = "sc-finality-grandpa-rpc" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "RPC extensions for the GRANDPA finality gadget" repository = "https://github.com/paritytech/substrate/" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +readme = "README.md" [dependencies] -sc-rpc = { version = "2.0.0-rc5", path = "../../rpc" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sc-finality-grandpa = { version = "0.8.0-rc5", path = "../" } +sc-finality-grandpa = { version = "0.8.0", path = "../" } +sc-rpc = { version = "2.0.0", path = "../../rpc" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } -jsonrpc-core = "14.2.0" -jsonrpc-core-client = "14.2.0" -jsonrpc-derive = "14.2.1" -jsonrpc-pubsub = "14.2.0" +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" +jsonrpc-pubsub = "15.0.0" futures = { version = "0.3.4", features = ["compat"] } serde = { version = "1.0.105", features = ["derive"] } serde_json = "1.0.50" log = "0.4.8" derive_more = "0.99.2" parity-scale-codec = { version = "1.3.0", features = ["derive"] } +sc-client-api = { version = "2.0.0", path = "../../api" } [dev-dependencies] -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } -sc-network-test = { version = "0.8.0-rc5", path = "../../network/test" } -sc-rpc = { version = "2.0.0-rc5", path = "../../rpc", features = ["test-helpers"] } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0-rc5", path = "../../../primitives/finality-grandpa" } -sp-keyring = { version = "2.0.0-rc5", path = "../../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sc-rpc = { version = "2.0.0", path = "../../rpc", features = ["test-helpers"] } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0", path = "../../../primitives/finality-grandpa" } +sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } lazy_static = "1.4" diff --git a/client/finality-grandpa/rpc/README.md b/client/finality-grandpa/rpc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0007f55dbd4dbfa5fe0b10b0cb9e0714def30b1a --- /dev/null +++ b/client/finality-grandpa/rpc/README.md @@ -0,0 +1,3 @@ +RPC API for GRANDPA. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/finality-grandpa/rpc/src/error.rs b/client/finality-grandpa/rpc/src/error.rs index bfd0596fdf3205d3eda8003a3a0f109b83fe836f..6464acbe10ea076ed637d603fbf48692509fb8e6 100644 --- a/client/finality-grandpa/rpc/src/error.rs +++ b/client/finality-grandpa/rpc/src/error.rs @@ -16,8 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::NOT_READY_ERROR_CODE; - #[derive(derive_more::Display, derive_more::From)] /// Top-level error type for the RPC handler pub enum Error { @@ -30,13 +28,41 @@ pub enum Error { /// GRANDPA reports voter state with round id or weights larger than 32-bits. #[display(fmt = "GRANDPA reports voter state as unreasonably large")] VoterStateReportsUnreasonablyLargeNumbers, + /// GRANDPA prove finality failed. + #[display(fmt = "GRANDPA prove finality rpc failed: {}", _0)] + ProveFinalityFailed(sp_blockchain::Error), +} + +/// The error codes returned by jsonrpc. +pub enum ErrorCode { + /// Returned when Grandpa RPC endpoint is not ready. + NotReady = 1, + /// Authority set ID is larger than 32-bits. + AuthoritySetTooLarge, + /// Voter state with round id or weights larger than 32-bits. + VoterStateTooLarge, + /// Failed to prove finality. + ProveFinality, +} + +impl From for ErrorCode { + fn from(error: Error) -> Self { + match error { + Error::EndpointNotReady => ErrorCode::NotReady, + Error::AuthoritySetIdReportedAsUnreasonablyLarge => ErrorCode::AuthoritySetTooLarge, + Error::VoterStateReportsUnreasonablyLargeNumbers => ErrorCode::VoterStateTooLarge, + Error::ProveFinalityFailed(_) => ErrorCode::ProveFinality, + } + } } impl From for jsonrpc_core::Error { fn from(error: Error) -> Self { + let message = format!("{}", error); + let code = ErrorCode::from(error); jsonrpc_core::Error { - message: format!("{}", error), - code: jsonrpc_core::ErrorCode::ServerError(NOT_READY_ERROR_CODE), + message, + code: jsonrpc_core::ErrorCode::ServerError(code as i64), data: None, } } diff --git a/client/finality-grandpa/rpc/src/finality.rs b/client/finality-grandpa/rpc/src/finality.rs new file mode 100644 index 0000000000000000000000000000000000000000..1f288b86a0e469efefcbc7b826f23c527a8344fc --- /dev/null +++ b/client/finality-grandpa/rpc/src/finality.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use serde::{Serialize, Deserialize}; + +use sc_finality_grandpa::FinalityProofProvider; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +#[derive(Serialize, Deserialize)] +pub struct EncodedFinalityProofs(pub sp_core::Bytes); + +/// Local trait mainly to allow mocking in tests. +pub trait RpcFinalityProofProvider { + /// Return finality proofs for the given authorities set id, if it is provided, otherwise the + /// current one will be used. + fn rpc_prove_finality( + &self, + begin: Block::Hash, + end: Block::Hash, + authorities_set_id: u64, + ) -> Result, sp_blockchain::Error>; +} + +impl RpcFinalityProofProvider for FinalityProofProvider +where + Block: BlockT, + NumberFor: finality_grandpa::BlockNumberOps, + B: sc_client_api::backend::Backend + Send + Sync + 'static, +{ + fn rpc_prove_finality( + &self, + begin: Block::Hash, + end: Block::Hash, + authorities_set_id: u64, + ) -> Result, sp_blockchain::Error> { + self.prove_finality(begin, end, authorities_set_id) + .map(|x| x.map(|y| EncodedFinalityProofs(y.into()))) + } +} diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index c00c95c5f776fd7c25ea66e7f1cf02002e6b1a2c..172473ad6518bc90e709f9bb24ebe98058fb89de 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -19,6 +19,7 @@ //! RPC API for GRANDPA. #![warn(missing_docs)] +use std::sync::Arc; use futures::{FutureExt, TryFutureExt, TryStreamExt, StreamExt}; use log::warn; use jsonrpc_derive::rpc; @@ -27,27 +28,27 @@ use jsonrpc_core::futures::{ sink::Sink as Sink01, stream::Stream as Stream01, future::Future as Future01, + future::Executor as Executor01, }; mod error; +mod finality; mod notification; mod report; use sc_finality_grandpa::GrandpaJustificationStream; use sp_runtime::traits::Block as BlockT; +use finality::{EncodedFinalityProofs, RpcFinalityProofProvider}; use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; use notification::JustificationNotification; -/// Returned when Grandpa RPC endpoint is not ready. -pub const NOT_READY_ERROR_CODE: i64 = 1; - type FutureResult = Box + Send>; /// Provides RPC methods for interacting with GRANDPA. #[rpc] -pub trait GrandpaApi { +pub trait GrandpaApi { /// RPC Metadata type Metadata; @@ -80,39 +81,59 @@ pub trait GrandpaApi { metadata: Option, id: SubscriptionId ) -> jsonrpc_core::Result; + + /// Prove finality for the range (begin; end] hash. Returns None if there are no finalized blocks + /// unknown in the range. If no authorities set is provided, the current one will be attempted. + #[rpc(name = "grandpa_proveFinality")] + fn prove_finality( + &self, + begin: Hash, + end: Hash, + authorities_set_id: Option, + ) -> FutureResult>; } /// Implements the GrandpaApi RPC trait for interacting with GRANDPA. -pub struct GrandpaRpcHandler { +pub struct GrandpaRpcHandler { authority_set: AuthoritySet, voter_state: VoterState, justification_stream: GrandpaJustificationStream, manager: SubscriptionManager, + finality_proof_provider: Arc, } -impl GrandpaRpcHandler { +impl + GrandpaRpcHandler +{ /// Creates a new GrandpaRpcHandler instance. - pub fn new( + pub fn new( authority_set: AuthoritySet, voter_state: VoterState, justification_stream: GrandpaJustificationStream, - manager: SubscriptionManager, - ) -> Self { + executor: E, + finality_proof_provider: Arc, + ) -> Self + where + E: Executor01 + Send>> + Send + Sync + 'static, + { + let manager = SubscriptionManager::new(Arc::new(executor)); Self { authority_set, voter_state, justification_stream, manager, + finality_proof_provider, } } } -impl GrandpaApi - for GrandpaRpcHandler +impl GrandpaApi + for GrandpaRpcHandler where VoterState: ReportVoterState + Send + Sync + 'static, AuthoritySet: ReportAuthoritySet + Send + Sync + 'static, Block: BlockT, + ProofProvider: RpcFinalityProofProvider + Send + Sync + 'static, { type Metadata = sc_rpc::Metadata; @@ -147,6 +168,30 @@ where ) -> jsonrpc_core::Result { Ok(self.manager.cancel(id)) } + + fn prove_finality( + &self, + begin: Block::Hash, + end: Block::Hash, + authorities_set_id: Option, + ) -> FutureResult> { + // If we are not provided a set_id, try with the current one. + let authorities_set_id = authorities_set_id + .unwrap_or_else(|| self.authority_set.get().0); + let result = self + .finality_proof_provider + .rpc_prove_finality(begin, end, authorities_set_id); + let future = async move { result }.boxed(); + Box::new( + future + .map_err(|e| { + warn!("Error proving finality: {}", e); + error::Error::ProveFinalityFailed(e) + }) + .map_err(jsonrpc_core::Error::from) + .compat() + ) + } } #[cfg(test)] @@ -155,16 +200,19 @@ mod tests { use std::{collections::HashSet, convert::TryInto, sync::Arc}; use jsonrpc_core::{Notification, Output, types::Params}; - use parity_scale_codec::Decode; + use parity_scale_codec::{Encode, Decode}; use sc_block_builder::BlockBuilder; - use sc_finality_grandpa::{report, AuthorityId, GrandpaJustificationSender, GrandpaJustification}; + use sc_finality_grandpa::{ + report, AuthorityId, GrandpaJustificationSender, GrandpaJustification, + FinalityProofFragment, + }; use sp_blockchain::HeaderBackend; use sp_consensus::RecordProof; use sp_core::crypto::Public; use sp_keyring::Ed25519Keyring; - use sp_runtime::traits::Header as HeaderT; + use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use substrate_test_runtime_client::{ - runtime::Block, + runtime::{Block, Header, H256}, DefaultTestClientBuilderExt, TestClientBuilderExt, TestClientBuilder, @@ -174,6 +222,10 @@ mod tests { struct TestVoterState; struct EmptyVoterState; + struct TestFinalityProofProvider { + finality_proofs: Vec>, + } + fn voters() -> HashSet { let voter_id_1 = AuthorityId::from_slice(&[1; 32]); let voter_id_2 = AuthorityId::from_slice(&[2; 32]); @@ -193,6 +245,31 @@ mod tests { } } + fn header(number: u64) -> Header { + let parent_hash = match number { + 0 => Default::default(), + _ => header(number - 1).hash(), + }; + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(0), + parent_hash, + Default::default(), + ) + } + + impl RpcFinalityProofProvider for TestFinalityProofProvider { + fn rpc_prove_finality( + &self, + _begin: Block::Hash, + _end: Block::Hash, + _authoritites_set_id: u64, + ) -> Result, sp_blockchain::Error> { + Ok(Some(EncodedFinalityProofs(self.finality_proofs.encode().into()))) + } + } + impl ReportVoterState for TestVoterState { fn get(&self) -> Option> { let voter_id_1 = AuthorityId::from_slice(&[1; 32]); @@ -230,15 +307,28 @@ mod tests { GrandpaJustificationSender, ) where VoterState: ReportVoterState + Send + Sync + 'static, + { + setup_io_handler_with_finality_proofs(voter_state, Default::default()) + } + + fn setup_io_handler_with_finality_proofs( + voter_state: VoterState, + finality_proofs: Vec>, + ) -> ( + jsonrpc_core::MetaIoHandler, + GrandpaJustificationSender, + ) where + VoterState: ReportVoterState + Send + Sync + 'static, { let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); - let manager = SubscriptionManager::new(Arc::new(sc_rpc::testing::TaskExecutor)); + let finality_proof_provider = Arc::new(TestFinalityProofProvider { finality_proofs }); let handler = GrandpaRpcHandler::new( TestAuthoritySet, voter_state, justification_stream, - manager, + sc_rpc::testing::TaskExecutor, + finality_proof_provider, ); let mut io = jsonrpc_core::MetaIoHandler::default(); @@ -406,7 +496,7 @@ mod tests { // Notify with a header and justification let justification = create_justification(); - let _ = justification_sender.notify(justification.clone()).unwrap(); + justification_sender.notify(|| Ok(justification.clone())).unwrap(); // Inspect what we received let recv = receiver.take(1).wait().flatten().collect::>(); @@ -418,7 +508,7 @@ mod tests { let recv_sub_id: String = serde_json::from_value(json_map["subscription"].take()).unwrap(); - let recv_justification: Vec = + let recv_justification: sp_core::Bytes = serde_json::from_value(json_map["result"].take()).unwrap(); let recv_justification: GrandpaJustification = Decode::decode(&mut &recv_justification[..]).unwrap(); @@ -427,4 +517,32 @@ mod tests { assert_eq!(recv_sub_id, sub_id); assert_eq!(recv_justification, justification); } + + #[test] + fn prove_finality_with_test_finality_proof_provider() { + let finality_proofs = vec![FinalityProofFragment { + block: header(42).hash(), + justification: create_justification().encode(), + unknown_headers: vec![header(2)], + authorities_proof: None, + }]; + let (io, _) = setup_io_handler_with_finality_proofs( + TestVoterState, + finality_proofs.clone(), + ); + + let request = "{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_proveFinality\",\"params\":[\ + \"0x0000000000000000000000000000000000000000000000000000000000000000\",\ + \"0x0000000000000000000000000000000000000000000000000000000000000001\",\ + 42\ + ],\"id\":1}"; + + let meta = sc_rpc::Metadata::default(); + let resp = io.handle_request_sync(request, meta); + let mut resp: serde_json::Value = serde_json::from_str(&resp.unwrap()).unwrap(); + let result: sp_core::Bytes = serde_json::from_value(resp["result"].take()).unwrap(); + let fragments: Vec> = + Decode::decode(&mut &result[..]).unwrap(); + assert_eq!(fragments, finality_proofs); + } } diff --git a/client/finality-grandpa/rpc/src/notification.rs b/client/finality-grandpa/rpc/src/notification.rs index 831f4681549a7d0d6c50b52063d5d3c69bd5accc..fd03a622b21967c723bcdfb67a56bd70ffd5cdd3 100644 --- a/client/finality-grandpa/rpc/src/notification.rs +++ b/client/finality-grandpa/rpc/src/notification.rs @@ -23,10 +23,10 @@ use sc_finality_grandpa::GrandpaJustification; /// An encoded justification proving that the given header has been finalized #[derive(Clone, Serialize, Deserialize)] -pub struct JustificationNotification(Vec); +pub struct JustificationNotification(sp_core::Bytes); impl From> for JustificationNotification { fn from(notification: GrandpaJustification) -> Self { - JustificationNotification(notification.encode()) + JustificationNotification(notification.encode().into()) } } diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index a8bfb84416b8f453bfd19ae12fe693929bb5c71e..9509922cf2d3a6463e339702c9e2c2830f533631 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -69,7 +69,7 @@ mod periodic; pub(crate) mod tests; pub use sp_finality_grandpa::GRANDPA_ENGINE_ID; -pub const GRANDPA_PROTOCOL_NAME: &[u8] = b"/paritytech/grandpa/1"; +pub const GRANDPA_PROTOCOL_NAME: &'static str = "/paritytech/grandpa/1"; // cost scalars for reporting peers. mod cost { diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 273804f7a45080a84c2a3bc39c2c97dbffbeedb6..1a773acd6d0fbaadb645824f00f6b24f1c9ecd6e 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -61,7 +61,7 @@ impl sc_network_gossip::Network for TestNetwork { let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} + fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} fn announce(&self, block: Hash, _associated_data: Vec) { let _ = self.sender.unbounded_send(Event::Announce(block)); @@ -361,7 +361,7 @@ fn good_commit_leads_to_relay() { #[test] fn bad_commit_leads_to_report() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let private = [Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let public = make_ids(&private[..]); let voter_set = Arc::new(VoterSet::new(public.iter().cloned()).unwrap()); diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index a7a29fe0e8a65ec2f7a4014193ebb5ed0d789c64..9215dcb32351608e1321210e5e649a88abfcd09c 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -39,7 +39,7 @@ use sp_runtime::generic::BlockId; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, }; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; +use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; use crate::{ CommandOrError, Commit, Config, Error, Precommit, Prevote, @@ -59,7 +59,7 @@ use sp_finality_grandpa::{ AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GrandpaApi, RoundNumber, SetId, }; -use prometheus_endpoint::{Gauge, U64, register, PrometheusError}; +use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; type HistoricalVotes = finality_grandpa::HistoricalVotes< ::Hash, @@ -378,14 +378,32 @@ impl SharedVoterSetState { #[derive(Clone)] pub(crate) struct Metrics { finality_grandpa_round: Gauge, + finality_grandpa_prevotes: Counter, + finality_grandpa_precommits: Counter, } impl Metrics { - pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { + pub(crate) fn register( + registry: &prometheus_endpoint::Registry, + ) -> Result { Ok(Self { finality_grandpa_round: register( Gauge::new("finality_grandpa_round", "Highest completed GRANDPA round.")?, - registry + registry, + )?, + finality_grandpa_prevotes: register( + Counter::new( + "finality_grandpa_prevotes_total", + "Total number of GRANDPA prevotes cast locally.", + )?, + registry, + )?, + finality_grandpa_precommits: register( + Counter::new( + "finality_grandpa_precommits_total", + "Total number of GRANDPA precommits cast locally.", + )?, + registry, )?, }) } @@ -645,7 +663,8 @@ pub(crate) fn ancestry( client: &Arc, base: Block::Hash, block: Block::Hash, -) -> Result, GrandpaError> where +) -> Result, GrandpaError> +where Client: HeaderMetadata, { if base == block { return Err(GrandpaError::NotDescendent) } @@ -671,15 +690,14 @@ pub(crate) fn ancestry( Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) } -impl - voter::Environment> -for Environment +impl voter::Environment> + for Environment where Block: 'static, B: Backend, C: crate::ClientForGrandpa + 'static, C::Api: GrandpaApi, - N: NetworkT + 'static + Send + Sync, + N: NetworkT + 'static + Send + Sync, SC: SelectChain + 'static, VR: VotingRule, NumberFor: BlockNumberOps, @@ -804,9 +822,22 @@ where None => return Ok(()), }; + let report_prevote_metrics = |prevote: &Prevote| { + telemetry!(CONSENSUS_DEBUG; "afg.prevote_issued"; + "round" => round, + "target_number" => ?prevote.target_number, + "target_hash" => ?prevote.target_hash, + ); + + if let Some(metrics) = self.metrics.as_ref() { + metrics.finality_grandpa_prevotes.inc(); + } + }; + self.update_voter_set_state(|voter_set_state| { let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; - let current_round = current_rounds.get(&round) + let current_round = current_rounds + .get(&round) .expect("checked in with_current_round that key exists; qed."); if !current_round.can_prevote() { @@ -816,6 +847,9 @@ where return Ok(None); } + // report to telemetry and prometheus + report_prevote_metrics(&prevote); + let propose = current_round.propose(); let mut current_rounds = current_rounds.clone(); @@ -837,7 +871,11 @@ where Ok(()) } - fn precommitted(&self, round: RoundNumber, precommit: Precommit) -> Result<(), Self::Error> { + fn precommitted( + &self, + round: RoundNumber, + precommit: Precommit, + ) -> Result<(), Self::Error> { let local_id = crate::is_voter(&self.voters, self.config.keystore.as_ref()); let local_id = match local_id { @@ -845,9 +883,22 @@ where None => return Ok(()), }; + let report_precommit_metrics = |precommit: &Precommit| { + telemetry!(CONSENSUS_DEBUG; "afg.precommit_issued"; + "round" => round, + "target_number" => ?precommit.target_number, + "target_hash" => ?precommit.target_hash, + ); + + if let Some(metrics) = self.metrics.as_ref() { + metrics.finality_grandpa_precommits.inc(); + } + }; + self.update_voter_set_state(|voter_set_state| { let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; - let current_round = current_rounds.get(&round) + let current_round = current_rounds + .get(&round) .expect("checked in with_current_round that key exists; qed."); if !current_round.can_precommit() { @@ -857,13 +908,16 @@ where return Ok(None); } + // report to telemetry and prometheus + report_precommit_metrics(&precommit); + let propose = current_round.propose(); let prevote = match current_round { HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, _ => { let msg = "Voter precommitting before prevoting."; return Err(Error::Safety(msg.to_string())); - }, + } }; let mut current_rounds = current_rounds.clone(); @@ -1023,7 +1077,7 @@ where number, (round, commit).into(), false, - &self.justification_sender, + self.justification_sender.as_ref(), ) } @@ -1088,9 +1142,10 @@ pub(crate) fn finalize_block( number: NumberFor, justification_or_commit: JustificationOrCommit, initial_sync: bool, - justification_sender: &Option>, -) -> Result<(), CommandOrError>> where - Block: BlockT, + justification_sender: Option<&GrandpaJustificationSender>, +) -> Result<(), CommandOrError>> +where + Block: BlockT, BE: Backend, Client: crate::ClientForGrandpa, { @@ -1154,6 +1209,18 @@ pub(crate) fn finalize_block( } } + // send a justification notification if a sender exists and in case of error log it. + fn notify_justification( + justification_sender: Option<&GrandpaJustificationSender>, + justification: impl FnOnce() -> Result, Error>, + ) { + if let Some(sender) = justification_sender { + if let Err(err) = sender.notify(justification) { + warn!(target: "afg", "Error creating justification for subscriber: {:?}", err); + } + } + } + // NOTE: this code assumes that honest voters will never vote past a // transition block, thus we don't have to worry about the case where // we have a transition with `effective_block = N`, but we finalize @@ -1161,7 +1228,10 @@ pub(crate) fn finalize_block( // justifications for transition blocks which will be requested by // syncing clients. let justification = match justification_or_commit { - JustificationOrCommit::Justification(justification) => Some(justification), + JustificationOrCommit::Justification(justification) => { + notify_justification(justification_sender, || Ok(justification.clone())); + Some(justification.encode()) + }, JustificationOrCommit::Commit((round_number, commit)) => { let mut justification_required = // justification is always required when block that enacts new authorities @@ -1181,29 +1251,31 @@ pub(crate) fn finalize_block( } } + // NOTE: the code below is a bit more verbose because we + // really want to avoid creating a justification if it isn't + // needed (e.g. if there's no subscribers), and also to avoid + // creating it twice. depending on the vote tree for the round, + // creating a justification might require multiple fetches of + // headers from the database. + let justification = || GrandpaJustification::from_commit( + &client, + round_number, + commit, + ); + if justification_required { - let justification = GrandpaJustification::from_commit( - &client, - round_number, - commit, - )?; + let justification = justification()?; + notify_justification(justification_sender, || Ok(justification.clone())); - Some(justification) + Some(justification.encode()) } else { + notify_justification(justification_sender, justification); + None } }, }; - // Notify any registered listeners in case we have a justification - if let Some(sender) = justification_sender { - if let Some(ref justification) = justification { - let _ = sender.notify(justification.clone()); - } - } - - let justification = justification.map(|j| j.encode()); - debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); // ideally some handle to a synchronization oracle would be used diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 2ac9ec57f3df4a6d706514a18109addaabc2f9df..33dd69cc11d6e73c328b2ceb4d0f594a40ba1fae 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -180,7 +180,30 @@ impl FinalityProofProvider ) -> Arc { Arc::new(Self::new(backend, storage_and_proof_provider)) } +} +impl FinalityProofProvider + where + Block: BlockT, + NumberFor: BlockNumberOps, + B: Backend + Send + Sync + 'static, +{ + /// Prove finality for the range (begin; end] hash. Returns None if there are no finalized blocks + /// unknown in the range. + pub fn prove_finality( + &self, + begin: Block::Hash, + end: Block::Hash, + authorities_set_id: u64, + ) -> Result>, ClientError> { + prove_finality::<_, _, GrandpaJustification>( + &*self.backend.blockchain(), + &*self.authority_provider, + authorities_set_id, + begin, + end, + ) + } } impl sc_network::config::FinalityProofProvider for FinalityProofProvider @@ -232,8 +255,8 @@ pub struct FinalityEffects { /// 1) the justification for the descendant block F; /// 2) headers sub-chain (B; F] if B != F; /// 3) proof of GRANDPA::authorities() if the set changes at block F. -#[derive(Debug, PartialEq, Encode, Decode)] -pub(crate) struct FinalityProofFragment { +#[derive(Debug, PartialEq, Encode, Decode, Clone)] +pub struct FinalityProofFragment { /// The hash of block F for which justification is provided. pub block: Header::Hash, /// Justification of the block F. diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index d5b0a650096ca08ca328f9d2b42c2b2787dcffad..04df95a3187e1bea91bea1c945394faaf6bccef6 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -619,7 +619,6 @@ where Client: crate::ClientForGrandpa, NumberFor: finality_grandpa::BlockNumberOps, { - /// Import a block justification and finalize the block. /// /// If `enacts_change` is set to true, then finalizing this block *must* @@ -653,7 +652,7 @@ where number, justification.into(), initial_sync, - &Some(self.justification_sender.clone()), + Some(&self.justification_sender), ); match result { diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index ab84591f9cdec7ffec60a62df473d9d8a8cb935f..a15130942c30fb927093565c847e2a1e3df19778 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -125,7 +125,7 @@ mod until_imported; mod voting_rule; pub use authorities::SharedAuthoritySet; -pub use finality_proof::{FinalityProofProvider, StorageAndProofProvider}; +pub use finality_proof::{FinalityProofFragment, FinalityProofProvider, StorageAndProofProvider}; pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream}; pub use import::GrandpaBlockImport; pub use justification::GrandpaJustification; diff --git a/client/finality-grandpa/src/notification.rs b/client/finality-grandpa/src/notification.rs index 16f705f0eeb1fda4f881e39d4977c04862233886..8415583051902330a99f5c4cb723311045844ed8 100644 --- a/client/finality-grandpa/src/notification.rs +++ b/client/finality-grandpa/src/notification.rs @@ -20,9 +20,10 @@ use std::sync::Arc; use parking_lot::Mutex; use sp_runtime::traits::Block as BlockT; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use crate::justification::GrandpaJustification; +use crate::Error; // Stream of justifications returned when subscribing. type JustificationStream = TracingUnboundedReceiver>; @@ -54,10 +55,22 @@ impl GrandpaJustificationSender { /// Send out a notification to all subscribers that a new justification /// is available for a block. - pub fn notify(&self, notification: GrandpaJustification) -> Result<(), ()> { - self.subscribers.lock().retain(|n| { - !n.is_closed() && n.unbounded_send(notification.clone()).is_ok() - }); + pub fn notify( + &self, + justification: impl FnOnce() -> Result, Error>, + ) -> Result<(), Error> { + let mut subscribers = self.subscribers.lock(); + + // do an initial prune on closed subscriptions + subscribers.retain(|n| !n.is_closed()); + + // if there's no subscribers we avoid creating + // the justification which is a costly operation + if !subscribers.is_empty() { + let justification = justification()?; + subscribers.retain(|n| n.unbounded_send(justification.clone()).is_ok()); + } + Ok(()) } } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 8fb536a369751b8bcbefa77ef827e76d706b33ad..6a9955aa86d81b0be763b683489ef0144c0c1055 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -74,11 +74,10 @@ fn grandpa_observer( last_finalized_number: NumberFor, commits: S, note_round: F, -) -> impl Future>>> where +) -> impl Future>>> +where NumberFor: BlockNumberOps, - S: Stream< - Item = Result, CommandOrError>>, - >, + S: Stream, CommandOrError>>>, F: Fn(u64), BE: Backend, Client: crate::ClientForGrandpa, @@ -130,7 +129,7 @@ fn grandpa_observer( finalized_number, (round, commit).into(), false, - &justification_sender, + justification_sender.as_ref(), ) { Ok(_) => {}, Err(e) => return future::err(e), diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d2905e4da44537a76ff602078c5283eff6631437..7e5282fe3e9f0507f5fe1c58e7b966181abde8a4 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -23,7 +23,7 @@ use assert_matches::assert_matches; use environment::HasVoted; use sc_network_test::{ Block, BlockImportAdapter, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, - TestClient, TestNetFactory, + TestClient, TestNetFactory, FullPeerConfig, }; use sc_network::config::{ProtocolConfig, BoxFinalityProofRequestBuilder}; use parking_lot::Mutex; @@ -94,6 +94,15 @@ impl TestNetFactory for GrandpaTestNet { ProtocolConfig::default() } + fn add_full_peer(&mut self) { + self.add_full_peer_with_config(FullPeerConfig { + notifications_protocols: vec![ + (communication::GRANDPA_ENGINE_ID, communication::GRANDPA_PROTOCOL_NAME.into()) + ], + ..Default::default() + }) + } + fn make_verifier( &self, _client: PeersClient, @@ -408,7 +417,7 @@ fn add_forced_change( #[test] fn finalize_3_voters_no_observers() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); @@ -513,7 +522,7 @@ fn finalize_3_voters_1_full_observer() { #[test] fn transition_3_voters_twice_1_full_observer() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let peers_a = &[ Ed25519Keyring::Alice, Ed25519Keyring::Bob, @@ -783,7 +792,7 @@ fn sync_justifications_on_change_blocks() { #[test] fn finalizes_multiple_pending_changes_in_order() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; @@ -843,7 +852,7 @@ fn finalizes_multiple_pending_changes_in_order() { #[test] fn force_change_to_new_set() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); // two of these guys are offline. let genesis_authorities = &[ @@ -1005,7 +1014,7 @@ fn voter_persists_its_votes() { use futures::future; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); // we have two authorities but we'll only be running the voter for alice @@ -1261,7 +1270,7 @@ fn voter_persists_its_votes() { #[test] fn finalize_3_voters_1_light_observer() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(authorities); @@ -1306,7 +1315,7 @@ fn finalize_3_voters_1_light_observer() { #[test] fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice]; @@ -1336,7 +1345,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ // for debug: to ensure that without forced change light client will sync finality proof const FORCE_CHANGE: bool = true; - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); // two of these guys are offline. @@ -1400,7 +1409,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ #[test] fn voter_catches_up_to_latest_round_when_behind() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 917052041ba38e3e33ebe2b79a5fdee77c4104e8..871cc3ef426ec5fc26bff690258a3b2c3d3df7de 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-informant" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,10 +17,10 @@ ansi_term = "0.12.1" futures = "0.3.4" log = "0.4.8" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc2", path = "../../primitives/utils" } -sp-transaction-pool = { version = "2.0.0-rc2", path = "../../primitives/transaction-pool" } +sc-client-api = { version = "2.0.0", path = "../api" } +sc-network = { version = "0.8.0", path = "../network" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } wasm-timer = "0.2" diff --git a/client/informant/README.md b/client/informant/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b494042590a4218aaa9c63d5e4b3c8a09a804b5b --- /dev/null +++ b/client/informant/README.md @@ -0,0 +1,3 @@ +Console informant. Prints sync progress and block events. Runs on the calling thread. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 3daf29a9f78371d58ee34b996a0302a7fb9bd8a8..a1f0ba9ae5fac3cc9dbceb78bd3b22ff32821167 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -23,7 +23,7 @@ use futures::prelude::*; use log::{info, trace, warn}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; -use sc_network::{network_state::NetworkState, NetworkStatus}; +use sc_network::NetworkStatus; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; use sp_transaction_pool::TransactionPool; @@ -81,7 +81,7 @@ impl TransactionPoolAndMaybeMallogSizeOf for /// Builds the informant and returns a `Future` that drives the informant. pub fn build( client: Arc, - network_status_sinks: Arc, NetworkState)>>, + network_status_sinks: Arc>>, pool: Arc, format: OutputFormat, ) -> impl futures::Future @@ -96,7 +96,7 @@ where network_status_sinks.push(Duration::from_millis(5000), network_status_sink); let display_notifications = network_status_stream - .for_each(move |(net_status, _)| { + .for_each(move |net_status| { let info = client_1.usage_info(); if let Some(ref usage) = info.usage { trace!(target: "usage", "Usage statistics: {}", usage); diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 0fe4ab5a9ba0e1ea4cf450de4a7e8a4faf86233d..e649fac7c78b1b1e618dcabcdca080961a4fef5b 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-keystore" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Keystore (and session key management) for ed25519 based chains like Polkadot." documentation = "https://docs.rs/sc-keystore" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } hex = "0.4.0" merlin = { version = "2.0", default-features = false } parking_lot = "0.10.0" diff --git a/client/keystore/README.md b/client/keystore/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9946a61d6fde65e8c0988d03c6d3e34b9d3d7bfb --- /dev/null +++ b/client/keystore/README.md @@ -0,0 +1,3 @@ +Keystore (and session key management) for ed25519 based chains like Polkadot. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index 53e84ae31864129f4285694620b9493357a8f34a..d9fecb7aa8fa2c22e270d48a58269d17b37511d8 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -1,27 +1,28 @@ [package] description = "components for a light client" name = "sc-light" -version = "2.0.0-rc5" +version = "2.0.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-light" +readme = "README.md" [dependencies] parking_lot = "0.10.0" lazy_static = "1.4.0" hash-db = "0.15.2" -sp-runtime = { version = "2.0.0-rc2", path = "../../primitives/runtime" } -sp-externalities = { version = "0.8.0-rc2", path = "../../primitives/externalities" } -sp-blockchain = { version = "2.0.0-rc2", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc2", path = "../../primitives/core" } -sp-state-machine = { version = "0.8.0-rc2", path = "../../primitives/state-machine" } -sc-client-api = { version = "2.0.0-rc2", path = "../api" } -sp-api = { version = "2.0.0-rc2", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor = { version = "0.8.0-rc2", path = "../executor" } +sc-executor = { version = "0.8.0", path = "../executor" } [features] default = [] diff --git a/client/light/README.md b/client/light/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1ba1f155b16527ba11e3c39f12364bf530bdf480 --- /dev/null +++ b/client/light/README.md @@ -0,0 +1,3 @@ +Light client components. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs index 9d557db887d2990549106b5436d0e043ca06a66b..3b5753f2849d51c8199d98debb99bf8e440386f0 100644 --- a/client/light/src/blockchain.rs +++ b/client/light/src/blockchain.rs @@ -29,7 +29,7 @@ use sp_blockchain::{ }; pub use sc_client_api::{ backend::{ - AuxStore, NewBlockState + AuxStore, NewBlockState, ProvideChtRoots, }, blockchain::{ Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, @@ -173,3 +173,21 @@ impl RemoteBlockchain for Blockchain })) } } + +impl, Block: BlockT> ProvideChtRoots for Blockchain { + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + self.storage().header_cht_root(cht_size, block) + } + + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + self.storage().changes_trie_cht_root(cht_size, block) + } +} diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 90d606238b6a5560aa0bdfa9ab14e771ce64d0c3..53610650c00ebcb795b0f839d618fd4feb2d7341 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,13 +1,14 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "0.8.0-rc5" +version = "0.8.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-network-gossip" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,15 +17,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.23.0", default-features = false } +libp2p = { version = "0.28.1", default-features = false } log = "0.4.8" lru = "0.4.3" -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } +sc-network = { version = "0.8.0", path = "../network" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } wasm-timer = "0.2" [dev-dependencies] async-std = "1.6.2" quickcheck = "0.9.0" rand = "0.7.2" -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/network-gossip/README.md b/client/network-gossip/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9030fac056407aada04522af786058f8f75d8718 --- /dev/null +++ b/client/network-gossip/README.md @@ -0,0 +1,41 @@ +Polite gossiping. + +This crate provides gossiping capabilities on top of a network. + +Gossip messages are separated by two categories: "topics" and consensus engine ID. +The consensus engine ID is sent over the wire with the message, while the topic is not, +with the expectation that the topic can be derived implicitly from the content of the +message, assuming it is valid. + +Topics are a single 32-byte tag associated with a message, used to group those messages +in an opaque way. Consensus code can invoke `broadcast_topic` to attempt to send all messages +under a single topic to all peers who don't have them yet, and `send_topic` to +send all messages under a single topic to a specific peer. + +# Usage + +- Implement the `Network` trait, representing the low-level networking primitives. It is + already implemented on `sc_network::NetworkService`. +- Implement the `Validator` trait. See the section below. +- Decide on a `ConsensusEngineId`. Each gossiping protocol should have a different one. +- Build a `GossipEngine` using these three elements. +- Use the methods of the `GossipEngine` in order to send out messages and receive incoming + messages. + +# What is a validator? + +The primary role of a `Validator` is to process incoming messages from peers, and decide +whether to discard them or process them. It also decides whether to re-broadcast the message. + +The secondary role of the `Validator` is to check if a message is allowed to be sent to a given +peer. All messages, before being sent, will be checked against this filter. +This enables the validator to use information it's aware of about connected peers to decide +whether to send messages to them at any given moment in time - In particular, to wait until +peers can accept and process the message before sending it. + +Lastly, the fact that gossip validators can decide not to rebroadcast messages +opens the door for neighbor status packets to be baked into the gossip protocol. +These status packets will typically contain light pieces of information +used to inform peers of a current view of protocol state. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index df2a5c8e7e94a5432fbd418a2a41a4864a721ec2..70c2942597aa5f740ea95798770e96130874f11f 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -69,7 +69,7 @@ impl GossipEngine { pub fn new + Send + Clone + 'static>( network: N, engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol_name: impl Into>, validator: Arc>, ) -> Self where B: 'static { // We grab the event stream before registering the notifications protocol, otherwise we @@ -333,7 +333,7 @@ mod tests { unimplemented!(); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} + fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} fn announce(&self, _: B::Hash, _: Vec) { unimplemented!(); @@ -362,7 +362,7 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), [1, 2, 3, 4], - "my_protocol".as_bytes(), + "my_protocol", Arc::new(AllowAll{}), ); @@ -390,7 +390,7 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), engine_id.clone(), - "my_protocol".as_bytes(), + "my_protocol", Arc::new(AllowAll{}), ); @@ -525,7 +525,7 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), engine_id.clone(), - "my_protocol".as_bytes(), + "my_protocol", Arc::new(TestValidator{}), ); diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 42aeca86cb275a9651e9b04c1a092d58790122dd..1d566ed3cbba299d63361d681f8b475ce299e319 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -87,7 +87,7 @@ pub trait Network { fn register_notifications_protocol( &self, engine_id: ConsensusEngineId, - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, ); /// Notify everyone we're connected to that we have the given block. @@ -117,7 +117,7 @@ impl Network for Arc> { fn register_notifications_protocol( &self, engine_id: ConsensusEngineId, - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, ) { NetworkService::register_notifications_protocol(self, engine_id, protocol_name) } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 80a0f9e70bcbfcaee7f8bc812aa17ba2721bc45c..60c669ecb66801cf162aeda61ac9aae54873f7c4 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -489,7 +489,7 @@ mod tests { unimplemented!(); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} + fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} fn announce(&self, _: B::Hash, _: Vec) { unimplemented!(); diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 39598e2a88755f954a7b47151dafb51d7f19be9a..76d8f0a23eda2af5bbb77ea5b586b379e8330890 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -1,13 +1,14 @@ [package] description = "Substrate network protocol" name = "sc-network" -version = "0.8.0-rc5" +version = "0.8.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-network" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,6 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.6.1" [dependencies] +async-trait = "0.1" +async-std = { version = "1.6.2", features = ["unstable"] } bitflags = "1.2.0" bs58 = "0.3.1" bytes = "0.5.0" @@ -24,7 +27,7 @@ derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" fnv = "1.0.6" -fork-tree = { version = "2.0.0-rc5", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } futures = "0.3.4" futures-timer = "3.0.2" futures_codec = "0.4.0" @@ -37,23 +40,23 @@ lru = "0.4.0" nohash-hasher = "0.2.0" parking_lot = "0.10.0" pin-project = "0.4.6" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-rc5", path = "../../utils/prometheus" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } prost = "0.6.1" rand = "0.7.2" -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-peerset = { version = "2.0.0-rc5", path = "../peerset" } +sc-block-builder = { version = "0.8.0", path = "../block-builder" } +sc-client-api = { version = "2.0.0", path = "../api" } +sc-peerset = { version = "2.0.0", path = "../peerset" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } slog_derive = "0.2.0" smallvec = "0.6.10" -sp-arithmetic = { version = "2.0.0-rc5", path = "../../primitives/arithmetic" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } +sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } thiserror = "1" unsigned-varint = { version = "0.4.0", features = ["futures", "futures-codec"] } void = "1.0.2" @@ -61,21 +64,20 @@ wasm-timer = "0.2" zeroize = "1.0.0" [dependencies.libp2p] -version = "0.23.0" +version = "0.28.1" default-features = false -features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "tcp-async-std", "websocket", "yamux"] +features = ["identify", "kad", "mdns-async-std", "mplex", "noise", "ping", "request-response", "tcp-async-std", "websocket", "yamux"] [dev-dependencies] -async-std = "1.6.2" assert_matches = "1.3" -env_logger = "0.7.0" -libp2p = { version = "0.23.0", default-features = false, features = ["secio"] } +libp2p = { version = "0.28.1", default-features = false } quickcheck = "0.9.0" rand = "0.7.2" -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -sp-test-primitives = { version = "2.0.0-rc5", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tempfile = "3.1.0" [features] diff --git a/client/network/README.md b/client/network/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e0bd691043bee3184be7745144fc06665cade72d --- /dev/null +++ b/client/network/README.md @@ -0,0 +1,226 @@ +Substrate-specific P2P networking. + +**Important**: This crate is unstable and the API and usage may change. + +# Node identities and addresses + +In a decentralized network, each node possesses a network private key and a network public key. +In Substrate, the keys are based on the ed25519 curve. + +From a node's public key, we can derive its *identity*. In Substrate and libp2p, a node's +identity is represented with the [`PeerId`] struct. All network communications between nodes on +the network use encryption derived from both sides's keys, which means that **identities cannot +be faked**. + +A node's identity uniquely identifies a machine on the network. If you start two or more +clients using the same network key, large interferences will happen. + +# Substrate's network protocol + +Substrate's networking protocol is based upon libp2p. It is at the moment not possible and not +planned to permit using something else than the libp2p network stack and the rust-libp2p +library. However the libp2p framework is very flexible and the rust-libp2p library could be +extended to support a wider range of protocols than what is offered by libp2p. + +## Discovery mechanisms + +In order for our node to join a peer-to-peer network, it has to know a list of nodes that are +part of said network. This includes nodes identities and their address (how to reach them). +Building such a list is called the **discovery** mechanism. There are three mechanisms that +Substrate uses: + +- Bootstrap nodes. These are hard-coded node identities and addresses passed alongside with +the network configuration. +- mDNS. We perform a UDP broadcast on the local network. Nodes that listen may respond with +their identity. More info [here](https://github.com/libp2p/specs/blob/master/discovery/mdns.md). +mDNS can be disabled in the network configuration. +- Kademlia random walk. Once connected, we perform random Kademlia `FIND_NODE` requests on the +configured Kademlia DHTs (one per configured chain protocol) in order for nodes to propagate to +us their view of the network. More information about Kademlia can be found [on +Wikipedia](https://en.wikipedia.org/wiki/Kademlia). + +## Connection establishment + +When node Alice knows node Bob's identity and address, it can establish a connection with Bob. +All connections must always use encryption and multiplexing. While some node addresses (eg. +addresses using `/quic`) already imply which encryption and/or multiplexing to use, for others +the **multistream-select** protocol is used in order to negotiate an encryption layer and/or a +multiplexing layer. + +The connection establishment mechanism is called the **transport**. + +As of the writing of this documentation, the following base-layer protocols are supported by +Substrate: + +- TCP/IP for addresses of the form `/ip4/1.2.3.4/tcp/5`. Once the TCP connection is open, an +encryption and a multiplexing layer are negotiated on top. +- WebSockets for addresses of the form `/ip4/1.2.3.4/tcp/5/ws`. A TCP/IP connection is open and +the WebSockets protocol is negotiated on top. Communications then happen inside WebSockets data +frames. Encryption and multiplexing are additionally negotiated again inside this channel. +- DNS for addresses of the form `/dns/example.com/tcp/5` or `/dns/example.com/tcp/5/ws`. A +node's address can contain a domain name. +- (All of the above using IPv6 instead of IPv4.) + +On top of the base-layer protocol, the [Noise](https://noiseprotocol.org/) protocol is +negotiated and applied. The exact handshake protocol is experimental and is subject to change. + +The following multiplexing protocols are supported: + +- [Mplex](https://github.com/libp2p/specs/tree/master/mplex). Support for mplex will likely +be deprecated in the future. +- [Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). + +## Substreams + +Once a connection has been established and uses multiplexing, substreams can be opened. When +a substream is open, the **multistream-select** protocol is used to negotiate which protocol +to use on that given substream. + +Protocols that are specific to a certain chain have a `` in their name. This +"protocol ID" is defined in the chain specifications. For example, the protocol ID of Polkadot +is "dot". In the protocol names below, `` must be replaced with the corresponding +protocol ID. + +> **Note**: It is possible for the same connection to be used for multiple chains. For example, +> one can use both the `/dot/sync/2` and `/sub/sync/2` protocols on the same +> connection, provided that the remote supports them. + +Substrate uses the following standard libp2p protocols: + +- **`/ipfs/ping/1.0.0`**. We periodically open an ephemeral substream in order to ping the +remote and check whether the connection is still alive. Failure for the remote to reply leads +to a disconnection. +- **[`/ipfs/id/1.0.0`](https://github.com/libp2p/specs/tree/master/identify)**. We +periodically open an ephemeral substream in order to ask information from the remote. +- **[`//kad`](https://github.com/libp2p/specs/pull/108)**. We periodically open +ephemeral substreams for Kademlia random walk queries. Each Kademlia query is done in a +separate substream. + +Additionally, Substrate uses the following non-libp2p-standard protocols: + +- **`/substrate//`** (where `` must be replaced with the +protocol ID of the targeted chain, and `` is a number between 2 and 6). For each +connection we optionally keep an additional substream for all Substrate-based communications alive. +This protocol is considered legacy, and is progressively being replaced with alternatives. +This is designated as "The legacy Substrate substream" in this documentation. See below for +more details. +- **`//sync/2`** is a request-response protocol (see below) that lets one perform +requests for information about blocks. Each request is the encoding of a `BlockRequest` and +each response is the encoding of a `BlockResponse`, as defined in the `api.v1.proto` file in +this source tree. +- **`//light/2`** is a request-response protocol (see below) that lets one perform +light-client-related requests for information about the state. Each request is the encoding of +a `light::Request` and each response is the encoding of a `light::Response`, as defined in the +`light.v1.proto` file in this source tree. +- **`//transactions/1`** is a notifications protocol (see below) where +transactions are pushed to other nodes. The handshake is empty on both sides. The message +format is a SCALE-encoded list of transactions, where each transaction is an opaque list of +bytes. +- **`//block-announces/1`** is a notifications protocol (see below) where +block announces are pushed to other nodes. The handshake is empty on both sides. The message +format is a SCALE-encoded tuple containing a block header followed with an opaque list of +bytes containing some data associated with this block announcement, e.g. a candidate message. +- Notifications protocols that are registered using the `register_notifications_protocol` +method. For example: `/paritytech/grandpa/1`. See below for more information. + +## The legacy Substrate substream + +Substrate uses a component named the **peerset manager (PSM)**. Through the discovery +mechanism, the PSM is aware of the nodes that are part of the network and decides which nodes +we should perform Substrate-based communications with. For these nodes, we open a connection +if necessary and open a unique substream for Substrate-based communications. If the PSM decides +that we should disconnect a node, then that substream is closed. + +For more information about the PSM, see the *sc-peerset* crate. + +Note that at the moment there is no mechanism in place to solve the issues that arise where the +two sides of a connection open the unique substream simultaneously. In order to not run into +issues, only the dialer of a connection is allowed to open the unique substream. When the +substream is closed, the entire connection is closed as well. This is a bug that will be +resolved by deprecating the protocol entirely. + +Within the unique Substrate substream, messages encoded using +[*parity-scale-codec*](https://github.com/paritytech/parity-scale-codec) are exchanged. +The detail of theses messages is not totally in place, but they can be found in the +`message.rs` file. + +Once the substream is open, the first step is an exchange of a *status* message from both +sides, containing information such as the chain root hash, head of chain, and so on. + +Communications within this substream include: + +- Syncing. Blocks are announced and requested from other nodes. +- Light-client requests. When a light client requires information, a random node we have a +substream open with is chosen, and the information is requested from it. +- Gossiping. Used for example by grandpa. + +## Request-response protocols + +A so-called request-response protocol is defined as follow: + +- When a substream is opened, the opening side sends a message whose content is +protocol-specific. The message must be prefixed with an +[LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. After the +message has been sent, the writing side is closed. +- The remote sends back the response prefixed with a LEB128-encoded length, and closes its +side as well. + +Each request is performed in a new separate substream. + +## Notifications protocols + +A so-called notifications protocol is defined as follow: + +- When a substream is opened, the opening side sends a handshake message whose content is +protocol-specific. The handshake message must be prefixed with an +[LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. The +handshake message can be of length 0, in which case the sender has to send a single `0`. +- The receiver then either immediately closes the substream, or answers with its own +LEB128-prefixed protocol-specific handshake response. The message can be of length 0, in which +case a single `0` has to be sent back. +- Once the handshake has completed, the notifications protocol is unidirectional. Only the +node which initiated the substream can push notifications. If the remote wants to send +notifications as well, it has to open its own undirectional substream. +- Each notification must be prefixed with an LEB128-encoded length. The encoding of the +messages is specific to each protocol. +- Either party can signal that it doesn't want a notifications substream anymore by closing +its writing side. The other party should respond by closing its own writing side soon after. + +The API of `sc-network` allows one to register user-defined notification protocols. +`sc-network` automatically tries to open a substream towards each node for which the legacy +Substream substream is open. The handshake is then performed automatically. + +For example, the `sc-finality-grandpa` crate registers the `/paritytech/grandpa/1` +notifications protocol. + +At the moment, for backwards-compatibility, notification protocols are tied to the legacy +Substrate substream. Additionally, the handshake message is hardcoded to be a single 8-bits +integer representing the role of the node: + +- 1 for a full node. +- 2 for a light node. +- 4 for an authority. + +In the future, though, these restrictions will be removed. + +# Usage + +Using the `sc-network` crate is done through the [`NetworkWorker`] struct. Create this +struct by passing a [`config::Params`], then poll it as if it was a `Future`. You can extract an +`Arc` from the `NetworkWorker`, which can be shared amongst multiple places +in order to give orders to the networking. + +See the [`config`] module for more information about how to configure the networking. + +After the `NetworkWorker` has been created, the important things to do are: + +- Calling `NetworkWorker::poll` in order to advance the network. This can be done by +dispatching a background task with the [`NetworkWorker`]. +- Calling `on_block_import` whenever a block is added to the client. +- Calling `on_block_finalized` whenever a block is finalized. +- Calling `trigger_repropagate` when a transaction is added to the pool. + +More precise usage details are still being worked on and will likely change in the future. + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index a43c61054d974b06d47d11509f3cd9a745dab5bd..6b3cfac38ae993139fd3b23f16c7be97340f4c67 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -16,7 +16,7 @@ use crate::{ config::{ProtocolId, Role}, block_requests, light_client_handler, finality_requests, - peer_info, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + peer_info, request_responses, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, protocol::{message::{self, Roles}, CustomMessageOutcome, NotificationsSink, Protocol}, ObservedRole, DhtEvent, ExHashT, }; @@ -39,6 +39,10 @@ use std::{ time::Duration, }; +pub use crate::request_responses::{ + ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, RequestId, SendRequestError +}; + /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOut", poll_method = "poll")] @@ -50,6 +54,8 @@ pub struct Behaviour { peer_info: peer_info::PeerInfoBehaviour, /// Discovers nodes of the network. discovery: DiscoveryBehaviour, + /// Generic request-reponse protocols. + request_responses: request_responses::RequestResponsesBehaviour, /// Block request handling. block_requests: block_requests::BlockRequests, /// Finality proof request handling. @@ -76,26 +82,44 @@ pub enum BehaviourOut { RandomKademliaStarted(ProtocolId), /// We have received a request from a peer and answered it. - AnsweredRequest { + /// + /// This event is generated for statistics purposes. + InboundRequest { /// Peer which sent us a request. peer: PeerId, /// Protocol name of the request. - protocol: Vec, - /// Time it took to build the response. - build_time: Duration, + protocol: Cow<'static, str>, + /// If `Ok`, contains the time elapsed between when we received the request and when we + /// sent back the response. If `Err`, the error that happened. + result: Result, + }, + + /// A request initiated using [`Behaviour::send_request`] has succeeded or failed. + RequestFinished { + /// Request that has succeeded. + request_id: RequestId, + /// Response sent by the remote or reason for failure. + result: Result, RequestFailure>, }, + /// Started a new request with the given node. - RequestStarted { + /// + /// This event is for statistics purposes only. The request and response handling are entirely + /// internal to the behaviour. + OpaqueRequestStarted { peer: PeerId, /// Protocol name of the request. - protocol: Vec, + protocol: String, }, /// Finished, successfully or not, a previously-started request. - RequestFinished { + /// + /// This event is for statistics purposes only. The request and response handling are entirely + /// internal to the behaviour. + OpaqueRequestFinished { /// Who we were requesting. peer: PeerId, /// Protocol name of the request. - protocol: Vec, + protocol: String, /// How long before the response came or the request got cancelled. request_duration: Duration, }, @@ -161,17 +185,20 @@ impl Behaviour { finality_proof_requests: finality_requests::FinalityProofRequests, light_client_handler: light_client_handler::LightClientHandler, disco_config: DiscoveryConfig, - ) -> Self { - Behaviour { + request_response_protocols: Vec, + ) -> Result { + Ok(Behaviour { substrate, peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), + request_responses: + request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, block_requests, finality_proof_requests, light_client_handler, events: VecDeque::new(), role, - } + }) } /// Returns the list of nodes that we know exist in the network. @@ -184,9 +211,12 @@ impl Behaviour { self.discovery.add_known_address(peer_id, addr) } - /// Returns the number of nodes that are in the Kademlia k-buckets. - pub fn num_kbuckets_entries(&mut self) -> impl ExactSizeIterator { - self.discovery.num_kbuckets_entries() + /// Returns the number of nodes in each Kademlia kbucket for each Kademlia instance. + /// + /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm + /// of their lower bound. + pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { + self.discovery.num_entries_per_kbucket() } /// Returns the number of records in the Kademlia record stores. @@ -208,6 +238,16 @@ impl Behaviour { self.peer_info.node(peer_id) } + /// Initiates sending a request. + /// + /// An error is returned if we are not connected to the target peer of if the protocol doesn't + /// match one that has been registered. + pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) + -> Result + { + self.request_responses.send_request(target, protocol, request) + } + /// Registers a new notifications protocol. /// /// Please call `event_stream` before registering a protocol, otherwise you may miss events @@ -218,7 +258,7 @@ impl Behaviour { pub fn register_notifications_protocol( &mut self, engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol_name: impl Into>, ) { // This is the message that we will send to the remote as part of the initial handshake. // At the moment, we force this to be an encoded `Roles`. @@ -298,20 +338,20 @@ Behaviour { CustomMessageOutcome::BlockRequest { target, request } => { match self.block_requests.send_request(&target, request) { block_requests::SendRequestOutcome::Ok => { - self.events.push_back(BehaviourOut::RequestStarted { + self.events.push_back(BehaviourOut::OpaqueRequestStarted { peer: target, - protocol: self.block_requests.protocol_name().to_vec(), + protocol: self.block_requests.protocol_name().to_owned(), }); }, block_requests::SendRequestOutcome::Replaced { request_duration, .. } => { - self.events.push_back(BehaviourOut::RequestFinished { + self.events.push_back(BehaviourOut::OpaqueRequestFinished { peer: target.clone(), - protocol: self.block_requests.protocol_name().to_vec(), + protocol: self.block_requests.protocol_name().to_owned(), request_duration, }); - self.events.push_back(BehaviourOut::RequestStarted { + self.events.push_back(BehaviourOut::OpaqueRequestStarted { peer: target, - protocol: self.block_requests.protocol_name().to_vec(), + protocol: self.block_requests.protocol_name().to_owned(), }); } block_requests::SendRequestOutcome::NotConnected | @@ -358,20 +398,41 @@ Behaviour { } } +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: request_responses::Event) { + match event { + request_responses::Event::InboundRequest { peer, protocol, result } => { + self.events.push_back(BehaviourOut::InboundRequest { + peer, + protocol, + result, + }); + } + + request_responses::Event::RequestFinished { request_id, result } => { + self.events.push_back(BehaviourOut::RequestFinished { + request_id, + result, + }); + }, + } + } +} + impl NetworkBehaviourEventProcess> for Behaviour { fn inject_event(&mut self, event: block_requests::Event) { match event { block_requests::Event::AnsweredRequest { peer, total_handling_time } => { - self.events.push_back(BehaviourOut::AnsweredRequest { + self.events.push_back(BehaviourOut::InboundRequest { peer, - protocol: self.block_requests.protocol_name().to_vec(), - build_time: total_handling_time, + protocol: self.block_requests.protocol_name().to_owned().into(), + result: Ok(total_handling_time), }); }, block_requests::Event::Response { peer, original_request: _, response, request_duration } => { - self.events.push_back(BehaviourOut::RequestFinished { + self.events.push_back(BehaviourOut::OpaqueRequestFinished { peer: peer.clone(), - protocol: self.block_requests.protocol_name().to_vec(), + protocol: self.block_requests.protocol_name().to_owned(), request_duration, }); let ev = self.substrate.on_block_response(peer, response); @@ -381,9 +442,9 @@ impl NetworkBehaviourEventProcess { // There doesn't exist any mechanism to report cancellations or timeouts yet, so // we process them by disconnecting the node. - self.events.push_back(BehaviourOut::RequestFinished { + self.events.push_back(BehaviourOut::OpaqueRequestFinished { peer: peer.clone(), - protocol: self.block_requests.protocol_name().to_vec(), + protocol: self.block_requests.protocol_name().to_owned(), request_duration, }); self.substrate.on_block_request_failed(&peer); diff --git a/client/network/src/block_requests.rs b/client/network/src/block_requests.rs index d7a12816dd4e1a4923bb95e49d1a482fd86fb874..7ee8f18f3a26f74c2f08e036bbe6ebb10cba73e6 100644 --- a/client/network/src/block_requests.rs +++ b/client/network/src/block_requests.rs @@ -124,7 +124,7 @@ pub struct Config { max_response_len: usize, inactivity_timeout: Duration, request_timeout: Duration, - protocol: Bytes, + protocol: String, } impl Config { @@ -143,7 +143,7 @@ impl Config { max_response_len: 16 * 1024 * 1024, inactivity_timeout: Duration::from_secs(15), request_timeout: Duration::from_secs(40), - protocol: Bytes::new(), + protocol: String::new(), }; c.set_protocol(id); c @@ -184,11 +184,11 @@ impl Config { /// Set protocol to use for upgrade negotiation. pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut v = Vec::new(); - v.extend_from_slice(b"/"); - v.extend_from_slice(id.as_bytes()); - v.extend_from_slice(b"/sync/2"); - self.protocol = v.into(); + let mut s = String::new(); + s.push_str("/"); + s.push_str(id.as_ref()); + s.push_str("/sync/2"); + self.protocol = s; self } } @@ -258,7 +258,7 @@ where } /// Returns the libp2p protocol name used on the wire (e.g. `/foo/sync/2`). - pub fn protocol_name(&self) -> &[u8] { + pub fn protocol_name(&self) -> &str { &self.config.protocol } @@ -322,7 +322,7 @@ where request: buf, original_request: req, max_response_size: self.config.max_response_len, - protocol: self.config.protocol.clone(), + protocol: self.config.protocol.as_bytes().to_vec().into(), }, }); @@ -472,13 +472,13 @@ where fn new_handler(&mut self) -> Self::ProtocolsHandler { let p = InboundProtocol { max_request_len: self.config.max_request_len, - protocol: self.config.protocol.clone(), + protocol: self.config.protocol.as_bytes().to_owned().into(), marker: PhantomData, }; let mut cfg = OneShotHandlerConfig::default(); cfg.keep_alive_timeout = self.config.inactivity_timeout; cfg.outbound_substream_timeout = self.config.request_timeout; - OneShotHandler::new(SubstreamProtocol::new(p), cfg) + OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 94b2993b4e6bd255e332de67d0219ecf90ff8a0a..7445ea0534bb71a2637330060a0b145bad86b03f 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -23,6 +23,7 @@ pub use crate::chain::{Client, FinalityProofProvider}; pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; +pub use crate::request_responses::{IncomingRequest, ProtocolConfig as RequestResponseConfig}; pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; // Note: this re-export shouldn't be part of the public API of the crate and will be removed in @@ -34,9 +35,10 @@ use crate::ExHashT; use core::{fmt, iter}; use futures::future; -use libp2p::identity::{ed25519, Keypair}; -use libp2p::wasm_ext; -use libp2p::{multiaddr, Multiaddr, PeerId}; +use libp2p::{ + identity::{ed25519, Keypair}, + multiaddr, wasm_ext, Multiaddr, PeerId, +}; use prometheus_endpoint::Registry; use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; @@ -48,6 +50,7 @@ use std::{ io::{self, Write}, net::Ipv4Addr, path::{Path, PathBuf}, + str, sync::Arc, }; use zeroize::Zeroize; @@ -233,20 +236,26 @@ impl TransactionPool for EmptyTransaction fn transaction(&self, _h: &H) -> Option { None } } -/// Name of a protocol, transmitted on the wire. Should be unique for each chain. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +/// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. +#[derive(Clone, PartialEq, Eq, Hash)] pub struct ProtocolId(smallvec::SmallVec<[u8; 6]>); -impl<'a> From<&'a [u8]> for ProtocolId { - fn from(bytes: &'a [u8]) -> ProtocolId { - ProtocolId(bytes.into()) +impl<'a> From<&'a str> for ProtocolId { + fn from(bytes: &'a str) -> ProtocolId { + ProtocolId(bytes.as_bytes().into()) } } -impl ProtocolId { - /// Exposes the `ProtocolId` as bytes. - pub fn as_bytes(&self) -> &[u8] { - self.0.as_ref() +impl AsRef for ProtocolId { + fn as_ref(&self) -> &str { + str::from_utf8(&self.0[..]) + .expect("the only way to build a ProtocolId is through a UTF-8 String; qed") + } +} + +impl fmt::Debug for ProtocolId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self.as_ref(), f) } } @@ -272,21 +281,8 @@ pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { /// Splits a Multiaddress into a Multiaddress and PeerId. pub fn parse_addr(mut addr: Multiaddr)-> Result<(PeerId, Multiaddr), ParseErr> { let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => { - if !matches!(key.algorithm(), multiaddr::multihash::Code::Identity) { - // (note: this is the "person bowing" emoji) - log::warn!( - "🙇 You are using the peer ID {}. This peer ID uses a legacy, deprecated \ - representation that will no longer be supported in the future. \ - Please refresh it by performing a RPC query to the appropriate node, \ - by looking at its logs, or by using `subkey inspect-node-key` on its \ - private key.", - bs58::encode(key.as_bytes()).into_string() - ); - } - - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)? - }, + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| ParseErr::InvalidPeerId)?, _ => return Err(ParseErr::PeerIdMissing), }; @@ -406,7 +402,9 @@ pub struct NetworkConfiguration { pub node_key: NodeKeyConfig, /// List of notifications protocols that the node supports. Must also include a /// `ConsensusEngineId` for backwards-compatibility. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, [u8]>)>, + pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, + /// List of request-response protocols that the node supports. + pub request_response_protocols: Vec, /// Maximum allowed number of incoming connections. pub in_peers: u32, /// Number of outgoing connections we're trying to maintain. @@ -442,6 +440,7 @@ impl NetworkConfiguration { boot_nodes: Vec::new(), node_key, notifications_protocols: Vec::new(), + request_response_protocols: Vec::new(), in_peers: 25, out_peers: 75, reserved_nodes: Vec::new(), @@ -458,9 +457,7 @@ impl NetworkConfiguration { allow_non_globals_in_dht: false, } } -} -impl NetworkConfiguration { /// Create new default configuration for localhost-only connection with random port (useful for testing) pub fn new_local() -> NetworkConfiguration { let mut config = NetworkConfiguration::new( @@ -615,10 +612,26 @@ impl NodeKeyConfig { Ok(Keypair::Ed25519(k.into())), Ed25519(Secret::File(f)) => - get_secret(f, - |mut b| ed25519::SecretKey::from_bytes(&mut b), + get_secret( + f, + |mut b| { + match String::from_utf8(b.to_vec()) + .ok() + .and_then(|s|{ + if s.len() == 64 { + hex::decode(&s).ok() + } else { + None + }} + ) + { + Some(s) => ed25519::SecretKey::from_bytes(s), + _ => ed25519::SecretKey::from_bytes(&mut b), + } + }, ed25519::SecretKey::generate, - |b| b.as_ref().to_vec()) + |b| b.as_ref().to_vec() + ) .map(ed25519::Keypair::from) .map(Keypair::Ed25519), } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index e349b08c41dc359f16e0e28503b9f069154cee74..6ef97708c1336c97821d6f6ad807a2c4ac8a6935 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -329,10 +329,18 @@ impl DiscoveryBehaviour { } } - /// Returns the number of nodes that are in the Kademlia k-buckets. - pub fn num_kbuckets_entries(&mut self) -> impl ExactSizeIterator { + /// Returns the number of nodes in each Kademlia kbucket for each Kademlia instance. + /// + /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm + /// of their lower bound. + pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { self.kademlias.iter_mut() - .map(|(id, kad)| (id, kad.kbuckets().map(|bucket| bucket.iter().count()).sum())) + .map(|(id, kad)| { + let buckets = kad.kbuckets() + .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) + .collect(); + (id, buckets) + }) } /// Returns the number of records in the Kademlia record stores. @@ -752,7 +760,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { // `DiscoveryBehaviour::new_handler` is still correct. fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { let mut v = vec![b'/']; - v.extend_from_slice(id.as_bytes()); + v.extend_from_slice(id.as_ref().as_bytes()); v.extend_from_slice(b"/kad"); v } @@ -765,39 +773,30 @@ mod tests { use libp2p::{Multiaddr, PeerId}; use libp2p::core::upgrade; use libp2p::core::transport::{Transport, MemoryTransport}; - use libp2p::core::upgrade::{InboundUpgradeExt, OutboundUpgradeExt}; + use libp2p::noise; use libp2p::swarm::Swarm; + use libp2p::yamux; use std::{collections::HashSet, task::Poll}; use super::{DiscoveryConfig, DiscoveryOut, protocol_name_from_protocol_id}; #[test] fn discovery_working() { let mut first_swarm_peer_id_and_addr = None; - let protocol_id = ProtocolId::from(b"dot".as_ref()); + let protocol_id = ProtocolId::from("dot"); // Build swarms whose behaviour is `DiscoveryBehaviour`, each aware of // the first swarm via `with_user_defined`. let mut swarms = (0..25).map(|i| { let keypair = Keypair::generate_ed25519(); - let keypair2 = keypair.clone(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); let transport = MemoryTransport - .and_then(move |out, endpoint| { - let secio = libp2p::secio::SecioConfig::new(keypair2); - libp2p::core::upgrade::apply( - out, - secio, - endpoint, - upgrade::Version::V1 - ) - }) - .and_then(move |(peer_id, stream), endpoint| { - let peer_id2 = peer_id.clone(); - let upgrade = libp2p::yamux::Config::default() - .map_inbound(move |muxer| (peer_id, muxer)) - .map_outbound(move |muxer| (peer_id2, muxer)); - upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1) - }); + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(yamux::Config::default()); let behaviour = { let mut config = DiscoveryConfig::new(keypair.public()); @@ -877,8 +876,8 @@ mod tests { #[test] fn discovery_ignores_peers_with_unknown_protocols() { - let supported_protocol_id = ProtocolId::from(b"a".as_ref()); - let unsupported_protocol_id = ProtocolId::from(b"b".as_ref()); + let supported_protocol_id = ProtocolId::from("a"); + let unsupported_protocol_id = ProtocolId::from("b"); let mut discovery = { let keypair = Keypair::generate_ed25519(); @@ -929,8 +928,8 @@ mod tests { #[test] fn discovery_adds_peer_to_kademlia_of_same_protocol_only() { - let protocol_a = ProtocolId::from(b"a".as_ref()); - let protocol_b = ProtocolId::from(b"b".as_ref()); + let protocol_a = ProtocolId::from("a"); + let protocol_b = ProtocolId::from("b"); let mut discovery = { let keypair = Keypair::generate_ed25519(); diff --git a/client/network/src/error.rs b/client/network/src/error.rs index d5a4024ef53d7ff1b2617ab9fad4fa1f73cb853c..7d7603ce92aab947b5fa581721b9d32dae9a964c 100644 --- a/client/network/src/error.rs +++ b/client/network/src/error.rs @@ -21,7 +21,7 @@ use crate::config::TransportConfig; use libp2p::{PeerId, Multiaddr}; -use std::fmt; +use std::{borrow::Cow, fmt}; /// Result type alias for the network. pub type Result = std::result::Result; @@ -61,6 +61,12 @@ pub enum Error { /// The invalid addresses. addresses: Vec, }, + /// The same request-response protocol has been registered multiple times. + #[display(fmt = "Request-response protocol registered multiple times: {}", protocol)] + DuplicateRequestResponseProtocol { + /// Name of the protocol registered multiple times. + protocol: Cow<'static, str>, + }, } // Make `Debug` use the `Display` implementation. @@ -78,6 +84,7 @@ impl std::error::Error for Error { Error::DuplicateBootnode { .. } => None, Error::Prometheus(ref err) => Some(err), Error::AddressesForAnotherTransport { .. } => None, + Error::DuplicateRequestResponseProtocol { .. } => None, } } } diff --git a/client/network/src/finality_requests.rs b/client/network/src/finality_requests.rs index de737cdd20a4e8f2b4d4db17b7404a01cf2c9bd3..55f56b9a0cc25c582f122e00d5fa3c933b508f14 100644 --- a/client/network/src/finality_requests.rs +++ b/client/network/src/finality_requests.rs @@ -129,7 +129,7 @@ impl Config { pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { let mut v = Vec::new(); v.extend_from_slice(b"/"); - v.extend_from_slice(id.as_bytes()); + v.extend_from_slice(id.as_ref().as_bytes()); v.extend_from_slice(b"/finality-proof/1"); self.protocol = v.into(); self @@ -235,7 +235,7 @@ where }; let mut cfg = OneShotHandlerConfig::default(); cfg.keep_alive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p), cfg) + OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs new file mode 100644 index 0000000000000000000000000000000000000000..0650e7a2f818b75b594e81191c5a4f7732fe7988 --- /dev/null +++ b/client/network/src/gossip.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper for sending rate-limited gossip messages. +//! +//! # Context +//! +//! The [`NetworkService`] struct provides a way to send notifications to a certain peer through +//! the [`NetworkService::notification_sender`] method. This method is quite low level and isn't +//! expected to be used directly. +//! +//! The [`QueuedSender`] struct provided by this module is built on top of +//! [`NetworkService::notification_sender`] and provides a cleaner way to send notifications. +//! +//! # Behaviour +//! +//! An instance of [`QueuedSender`] is specific to a certain combination of `PeerId` and +//! protocol name. It maintains a buffer of messages waiting to be sent out. The user of this API +//! is able to manipulate that queue, adding or removing obsolete messages. +//! +//! Creating a [`QueuedSender`] also returns a opaque `Future` whose responsibility it to +//! drain that queue and actually send the messages. If the substream with the given combination +//! of peer and protocol is closed, the queue is silently discarded. It is the role of the user +//! to track which peers we are connected to. +//! +//! In normal situations, messages sent through a [`QueuedSender`] will arrive in the same +//! order as they have been sent. +//! It is possible, in the situation of disconnects and reconnects, that messages arrive in a +//! different order. See also https://github.com/paritytech/substrate/issues/6756. +//! However, if multiple instances of [`QueuedSender`] exist for the same peer and protocol, or +//! if some other code uses the [`NetworkService`] to send notifications to this combination or +//! peer and protocol, then the notifications will be interleaved in an unpredictable way. +//! + +use crate::{ExHashT, NetworkService}; + +use async_std::sync::{Condvar, Mutex, MutexGuard}; +use futures::prelude::*; +use libp2p::PeerId; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use std::{ + collections::VecDeque, + fmt, + sync::{atomic, Arc}, + time::Duration, +}; + +#[cfg(test)] +mod tests; + +/// Notifications sender for a specific combination of network service, peer, and protocol. +pub struct QueuedSender { + /// Shared between the front and the back task. + shared: Arc>, +} + +impl QueuedSender { + /// Returns a new [`QueuedSender`] containing a queue of message for this specific + /// combination of peer and protocol. + /// + /// In addition to the [`QueuedSender`], also returns a `Future` whose role is to drive + /// the messages sending forward. + pub fn new( + service: Arc>, + peer_id: PeerId, + protocol: ConsensusEngineId, + queue_size_limit: usize, + messages_encode: F + ) -> (Self, impl Future + Send + 'static) + where + M: Send + 'static, + B: BlockT + 'static, + H: ExHashT, + F: Fn(M) -> Vec + Send + 'static, + { + let shared = Arc::new(Shared { + stop_task: atomic::AtomicBool::new(false), + condvar: Condvar::new(), + queue_size_limit, + messages_queue: Mutex::new(VecDeque::with_capacity(queue_size_limit)), + }); + + let task = spawn_task( + service, + peer_id, + protocol, + shared.clone(), + messages_encode + ); + + (QueuedSender { shared }, task) + } + + /// Locks the queue of messages towards this peer. + /// + /// The returned `Future` is expected to be ready quite quickly. + pub async fn lock_queue<'a>(&'a self) -> QueueGuard<'a, M> { + QueueGuard { + messages_queue: self.shared.messages_queue.lock().await, + condvar: &self.shared.condvar, + queue_size_limit: self.shared.queue_size_limit, + } + } + + /// Pushes a message to the queue, or discards it if the queue is full. + /// + /// The returned `Future` is expected to be ready quite quickly. + pub async fn queue_or_discard(&self, message: M) + where + M: Send + 'static + { + self.lock_queue().await.push_or_discard(message); + } +} + +impl fmt::Debug for QueuedSender { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("QueuedSender").finish() + } +} + +impl Drop for QueuedSender { + fn drop(&mut self) { + // The "clean" way to notify the `Condvar` here is normally to first lock the `Mutex`, + // then notify the `Condvar` while the `Mutex` is locked. Unfortunately, the `Mutex` + // being asynchronous, it can't reasonably be locked from within a destructor. + // See also the corresponding code in the background task. + self.shared.stop_task.store(true, atomic::Ordering::Release); + self.shared.condvar.notify_all(); + } +} + +/// Locked queue of messages to the given peer. +/// +/// As long as this struct exists, the background task is asleep and the owner of the [`QueueGuard`] +/// is in total control of the buffer. Messages can only ever be sent out after the [`QueueGuard`] +/// is dropped. +#[must_use] +pub struct QueueGuard<'a, M> { + messages_queue: MutexGuard<'a, VecDeque>, + condvar: &'a Condvar, + /// Same as [`Shared::queue_size_limit`]. + queue_size_limit: usize, +} + +impl<'a, M: Send + 'static> QueueGuard<'a, M> { + /// Pushes a message to the queue, or discards it if the queue is full. + /// + /// The message will only start being sent out after the [`QueueGuard`] is dropped. + pub fn push_or_discard(&mut self, message: M) { + if self.messages_queue.len() < self.queue_size_limit { + self.messages_queue.push_back(message); + } + } + + /// Calls `filter` for each message in the queue, and removes the ones for which `false` is + /// returned. + /// + /// > **Note**: The parameter of `filter` is a `&M` and not a `&mut M` (which would be + /// > better) because the underlying implementation relies on `VecDeque::retain`. + pub fn retain(&mut self, filter: impl FnMut(&M) -> bool) { + self.messages_queue.retain(filter); + } +} + +impl<'a, M> Drop for QueueGuard<'a, M> { + fn drop(&mut self) { + // We notify the `Condvar` in the destructor in order to be able to push multiple + // messages and wake up the background task only once afterwards. + self.condvar.notify_one(); + } +} + +#[derive(Debug)] +struct Shared { + /// Read by the background task after locking `locked`. If true, the task stops. + stop_task: atomic::AtomicBool, + /// Queue of messages waiting to be sent out. + messages_queue: Mutex>, + /// Must be notified every time the content of `locked` changes. + condvar: Condvar, + /// Maximum number of elements in `messages_queue`. + queue_size_limit: usize, +} + +async fn spawn_task Vec>( + service: Arc>, + peer_id: PeerId, + protocol: ConsensusEngineId, + shared: Arc>, + messages_encode: F, +) { + loop { + let next_message = 'next_msg: loop { + let mut queue = shared.messages_queue.lock().await; + + loop { + if shared.stop_task.load(atomic::Ordering::Acquire) { + return; + } + + if let Some(msg) = queue.pop_front() { + break 'next_msg msg; + } + + // It is possible that the destructor of `QueuedSender` sets `stop_task` to + // true and notifies the `Condvar` after the background task loads `stop_task` + // and before it calls `Condvar::wait`. + // See also the corresponding comment in `QueuedSender::drop`. + // For this reason, we use `wait_timeout`. In the worst case scenario, + // `stop_task` will always be checked again after the timeout is reached. + queue = shared.condvar.wait_timeout(queue, Duration::from_secs(10)).await.0; + } + }; + + // Starting from below, we try to send the message. If an error happens when sending, + // the only sane option we have is to silently discard the message. + let sender = match service.notification_sender(peer_id.clone(), protocol) { + Ok(s) => s, + Err(_) => continue, + }; + + let ready = match sender.ready().await { + Ok(r) => r, + Err(_) => continue, + }; + + let _ = ready.send(messages_encode(next_message)); + } +} diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..9ba44f564e1323328bafc68d5a8163d8c1aae2f9 --- /dev/null +++ b/client/network/src/gossip/tests.rs @@ -0,0 +1,201 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{config, gossip::QueuedSender, Event, NetworkService, NetworkWorker}; + +use futures::prelude::*; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +type TestNetworkService = NetworkService< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, +>; + +/// Builds a full node to be used for testing. Returns the node service and its associated events +/// stream. +/// +/// > **Note**: We return the events stream in order to not possibly lose events between the +/// > construction of the service and the moment the events stream is grabbed. +fn build_test_full_node(config: config::NetworkConfiguration) + -> (Arc, impl Stream) +{ + let client = Arc::new( + TestClientBuilder::with_default_backend() + .build_with_longest_chain() + .0, + ); + + #[derive(Clone)] + struct PassThroughVerifier(bool); + impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + fn verify( + &mut self, + origin: sp_consensus::BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result< + ( + sp_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) + }) + }) + .map(|blob| { + vec![( + sp_blockchain::well_known_cache_keys::AUTHORITIES, + blob.to_vec(), + )] + }); + + let mut import = sp_consensus::BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.0; + import.justification = justification; + import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + Ok((import, maybe_keys)) + } + } + + let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + None, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let worker = NetworkWorker::new(config::Params { + role: config::Role::Full, + executor: None, + network_config: config, + chain: client.clone(), + finality_proof_provider: None, + finality_proof_request_builder: None, + on_demand: None, + transaction_pool: Arc::new(crate::config::EmptyTransactionPool), + protocol_id: config::ProtocolId::from("/test-protocol-name"), + import_queue, + block_announce_validator: Box::new( + sp_consensus::block_validation::DefaultBlockAnnounceValidator, + ), + metrics_registry: None, + }) + .unwrap(); + + let service = worker.service().clone(); + let event_stream = service.event_stream("test"); + + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); + + (service, event_stream) +} + +const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; + +/// Builds two nodes and their associated events stream. +/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +fn build_nodes_one_proto() + -> (Arc, impl Stream, Arc, impl Stream) +{ + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![], + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + (node1, events_stream1, node2, events_stream2) +} + +#[test] +fn basic_works() { + const NUM_NOTIFS: usize = 256; + + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let node2_id = node2.local_peer_id().clone(); + + let receiver = async_std::task::spawn(async move { + let mut received_notifications = 0; + + while received_notifications < NUM_NOTIFS { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamClosed { .. } => panic!(), + Event::NotificationsReceived { messages, .. } => { + for message in messages { + assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.1, &b"message"[..]); + received_notifications += 1; + } + } + _ => {} + }; + + if rand::random::() < 2 { + async_std::task::sleep(Duration::from_millis(rand::random::() % 750)).await; + } + } + }); + + async_std::task::block_on(async move { + let (sender, bg_future) = + QueuedSender::new(node1, node2_id, ENGINE_ID, NUM_NOTIFS, |msg| msg); + async_std::task::spawn(bg_future); + + // Wait for the `NotificationStreamOpened`. + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {} + }; + } + + for _ in 0..NUM_NOTIFS { + sender.queue_or_discard(b"message".to_vec()).await; + } + + receiver.await; + }); +} diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 69635fce11f7c2b83cdb98d84bc44d3dde523d72..3fd01c33dcf5f0d0dc8dd0846bc9700483ab6fa5 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -15,6 +15,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . + #![warn(unused_extern_crates)] #![warn(missing_docs)] @@ -252,6 +253,7 @@ mod finality_requests; mod light_client_handler; mod on_demand_layer; mod protocol; +mod request_responses; mod schema; mod service; mod transport; @@ -259,15 +261,16 @@ mod utils; pub mod config; pub mod error; +pub mod gossip; pub mod network_state; -pub use service::{NetworkService, NetworkWorker}; -pub use protocol::PeerInfo; -pub use protocol::event::{Event, DhtEvent, ObservedRole}; -pub use protocol::sync::SyncState; -pub use libp2p::{Multiaddr, PeerId}; #[doc(inline)] -pub use libp2p::multiaddr; +pub use libp2p::{multiaddr, Multiaddr, PeerId}; +pub use protocol::{event::{DhtEvent, Event, ObservedRole}, sync::SyncState, PeerInfo}; +pub use service::{ + NetworkService, NetworkWorker, RequestFailure, OutboundFailure, NotificationSender, + NotificationSenderReady, +}; pub use sc_peerset::ReputationChange; use sp_runtime::traits::{Block as BlockT, NumberFor}; diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 678a717a898ff47b45174c3807a6d7d6b12a508c..c1ff14fc82a22e77696c6c66fe6436e83b2c6cd0 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -156,13 +156,13 @@ impl Config { pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { let mut vl = Vec::new(); vl.extend_from_slice(b"/"); - vl.extend_from_slice(id.as_bytes()); + vl.extend_from_slice(id.as_ref().as_bytes()); vl.extend_from_slice(b"/light/2"); self.light_protocol = vl.into(); let mut vb = Vec::new(); vb.extend_from_slice(b"/"); - vb.extend_from_slice(id.as_bytes()); + vb.extend_from_slice(id.as_ref().as_bytes()); vb.extend_from_slice(b"/sync/2"); self.block_protocol = vb.into(); @@ -758,7 +758,7 @@ where }; let mut cfg = OneShotHandlerConfig::default(); cfg.keep_alive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p), cfg) + OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) } fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { @@ -1447,7 +1447,7 @@ mod tests { } fn make_config() -> super::Config { - super::Config::new(&ProtocolId::from(&b"foo"[..])) + super::Config::new(&ProtocolId::from("foo")) } fn dummy_header() -> sp_test_primitives::Header { @@ -2004,7 +2004,7 @@ mod tests { #[test] fn send_receive_header() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let chan = oneshot::channel(); let request = light::RemoteHeaderRequest { cht_root: Default::default(), diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs index 2e24e9c5a9f58e9ff3fc9d8816f52481b7c3d30e..db2b6429304bb2b7d351725b907f361fa0ed9ef6 100644 --- a/client/network/src/network_state.rs +++ b/client/network/src/network_state.rs @@ -43,10 +43,6 @@ pub struct NetworkState { pub connected_peers: HashMap, /// List of node that we know of but that we're not connected to. pub not_connected_peers: HashMap, - /// The total number of bytes received. - pub total_bytes_inbound: u64, - /// The total number of bytes sent. - pub total_bytes_outbound: u64, /// State of the peerset manager. pub peerset: serde_json::Value, } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c1c9ef02ea60be60689fbe6827b9c962f70f9ab3..c1887ce35bfdb98fc738588e49d854fba8e88f70 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -18,7 +18,7 @@ use crate::{ ExHashT, - chain::{Client, FinalityProofProvider}, + chain::Client, config::{BoxFinalityProofRequestBuilder, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, error, utils::{interval, LruHashSet}, @@ -31,35 +31,30 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::{ - storage::{StorageKey, PrefixedStorageKey, ChildInfo, ChildType}, - hexdisplay::HexDisplay -}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} }; -use codec::{Decode, Encode}; +use codec::{Decode, DecodeAll, Encode}; use sp_runtime::{generic::BlockId, ConsensusEngineId, Justification}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub + Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub }; use sp_arithmetic::traits::SaturatedConversion; use message::{BlockAnnounce, Message}; use message::generic::{Message as GenericMessage, Roles}; use prometheus_endpoint::{ - Registry, Gauge, Counter, CounterVec, GaugeVec, + Registry, Gauge, Counter, GaugeVec, PrometheusError, Opts, register, U64 }; use sync::{ChainSync, SyncState}; use std::borrow::Cow; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque, hash_map::Entry}; +use std::collections::{HashMap, HashSet, VecDeque, hash_map::Entry}; use std::sync::Arc; use std::fmt::Write; -use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; +use std::{io, iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; -use sc_client_api::{ChangesProof, StorageProof}; use wasm_timer::Instant; mod generic_proto; @@ -91,11 +86,6 @@ pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; -// Maximum allowed entries in `BlockResponse` -const MAX_BLOCK_DATA_RESPONSE: u32 = 128; -// Maximum total bytes allowed for block bodies in `BlockResponse` -const MAX_BODIES_BYTES: usize = 8 * 1024 * 1024; - /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful /// and disconnect to free connection slot. @@ -118,16 +108,12 @@ mod rep { pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); /// Reputation change when a peer sends us a bad transaction. pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); - /// We sent an RPC query to the given node, but it failed. - pub const RPC_FAILED: Rep = Rep::new(-(1 << 12), "Remote call failed"); /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); /// We received an unexpected response. pub const UNEXPECTED_RESPONSE: Rep = Rep::new_fatal("Unexpected response packet"); /// We received an unexpected transaction packet. pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); - /// We received an unexpected light node request. - pub const UNEXPECTED_REQUEST: Rep = Rep::new_fatal("Unexpected block request packet"); /// Peer has different genesis. pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer is on unsupported protocol version. @@ -146,7 +132,6 @@ struct Metrics { finality_proofs: GaugeVec, justifications: GaugeVec, propagated_transactions: Counter, - legacy_requests_received: CounterVec, } impl Metrics { @@ -192,13 +177,6 @@ impl Metrics { "sync_propagated_transactions", "Number of transactions propagated to at least one peer", )?, r)?, - legacy_requests_received: register(CounterVec::new( - Opts::new( - "sync_legacy_requests_received", - "Number of block/finality/light-client requests received on the legacy substream", - ), - &["kind"] - )?, r)?, }) } } @@ -249,18 +227,16 @@ pub struct Protocol { /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, transaction_pool: Arc>, - /// When asked for a proof of finality, we use this struct to build one. - finality_proof_provider: Option>>, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: GenericProto, /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: HashMap>, + protocol_name_by_engine: HashMap>, /// For each protocol name, the legacy equivalent. - legacy_equiv_by_name: HashMap, Fallback>, + legacy_equiv_by_name: HashMap, Fallback>, /// Name of the protocol used for transactions. - transactions_protocol: Cow<'static, [u8]>, + transactions_protocol: Cow<'static, str>, /// Name of the protocol used for block announces. - block_announces_protocol: Cow<'static, [u8]>, + block_announces_protocol: Cow<'static, str>, /// Prometheus metrics. metrics: Option, /// The `PeerId`'s of all boot nodes. @@ -295,8 +271,6 @@ struct Peer { pub struct PeerInfo { /// Roles pub roles: Roles, - /// Protocol version - pub protocol_version: u32, /// Peer best block hash pub best_hash: B::Hash, /// Peer best block number @@ -388,7 +362,6 @@ impl Protocol { local_peer_id: PeerId, chain: Arc>, transaction_pool: Arc>, - finality_proof_provider: Option>>, finality_proof_request_builder: Option>, protocol_id: ProtocolId, peerset_config: sc_peerset::PeersetConfig, @@ -416,38 +389,43 @@ impl Protocol { }; let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); - let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let mut behaviour = GenericProto::new( - local_peer_id, - protocol_id.clone(), - versions, - build_status_message(&config, &chain), - peerset, - ); let mut legacy_equiv_by_name = HashMap::new(); - let transactions_protocol: Cow<'static, [u8]> = Cow::from({ - let mut proto = b"/".to_vec(); - proto.extend(protocol_id.as_bytes()); - proto.extend(b"/transactions/1"); + let transactions_protocol: Cow<'static, str> = Cow::from({ + let mut proto = String::new(); + proto.push_str("/"); + proto.push_str(protocol_id.as_ref()); + proto.push_str("/transactions/1"); proto }); - behaviour.register_notif_protocol(transactions_protocol.clone(), Vec::new()); legacy_equiv_by_name.insert(transactions_protocol.clone(), Fallback::Transactions); - let block_announces_protocol: Cow<'static, [u8]> = Cow::from({ - let mut proto = b"/".to_vec(); - proto.extend(protocol_id.as_bytes()); - proto.extend(b"/block-announces/1"); + let block_announces_protocol: Cow<'static, str> = Cow::from({ + let mut proto = String::new(); + proto.push_str("/"); + proto.push_str(protocol_id.as_ref()); + proto.push_str("/block-announces/1"); proto }); - behaviour.register_notif_protocol( - block_announces_protocol.clone(), - BlockAnnouncesHandshake::build(&config, &chain).encode() - ); legacy_equiv_by_name.insert(block_announces_protocol.clone(), Fallback::BlockAnnounce); + let behaviour = { + let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); + let block_announces_handshake = BlockAnnouncesHandshake::build(&config, &chain).encode(); + GenericProto::new( + local_peer_id, + protocol_id.clone(), + versions, + build_status_message(&config, &chain), + peerset, + // As documented in `GenericProto`, the first protocol in the list is always the + // one carrying the handshake reported in the `CustomProtocolOpen` event. + iter::once((block_announces_protocol.clone(), block_announces_handshake)) + .chain(iter::once((transactions_protocol.clone(), vec![]))), + ) + }; + let protocol = Protocol { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), @@ -464,7 +442,6 @@ impl Protocol { sync, important_peers, transaction_pool, - finality_proof_provider, peerset_handle: peerset_handle.clone(), behaviour, protocol_name_by_engine: HashMap::new(), @@ -613,12 +590,6 @@ impl Protocol { match message { GenericMessage::Status(_) => debug!(target: "sub-libp2p", "Received unexpected Status"), - GenericMessage::BlockRequest(r) => self.on_block_request(who, r), - GenericMessage::BlockResponse(r) => { - let outcome = self.on_block_response(who.clone(), r); - self.update_peer_info(&who); - return outcome - }, GenericMessage::BlockAnnounce(announce) => { let outcome = self.on_block_announce(who.clone(), announce); self.update_peer_info(&who); @@ -626,27 +597,33 @@ impl Protocol { }, GenericMessage::Transactions(m) => self.on_transactions(who, m), - GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), + GenericMessage::BlockResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected BlockResponse"), GenericMessage::RemoteCallResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse"), - GenericMessage::RemoteReadRequest(request) => - self.on_remote_read_request(who, request), GenericMessage::RemoteReadResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteReadResponse"), - GenericMessage::RemoteHeaderRequest(request) => - self.on_remote_header_request(who, request), GenericMessage::RemoteHeaderResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), - GenericMessage::RemoteChangesRequest(request) => - self.on_remote_changes_request(who, request), GenericMessage::RemoteChangesResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), - GenericMessage::FinalityProofRequest(request) => - self.on_finality_proof_request(who, request), - GenericMessage::FinalityProofResponse(response) => - return self.on_finality_proof_response(who, response), - GenericMessage::RemoteReadChildRequest(request) => - self.on_remote_read_child_request(who, request), + GenericMessage::FinalityProofResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected FinalityProofResponse"), + GenericMessage::BlockRequest(_) | + GenericMessage::FinalityProofRequest(_) | + GenericMessage::RemoteReadChildRequest(_) | + GenericMessage::RemoteCallRequest(_) | + GenericMessage::RemoteReadRequest(_) | + GenericMessage::RemoteHeaderRequest(_) | + GenericMessage::RemoteChangesRequest(_) => { + debug!( + target: "sub-libp2p", + "Received no longer supported legacy request from {:?}", + who + ); + self.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::BAD_PROTOCOL); + }, GenericMessage::Consensus(msg) => return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { CustomMessageOutcome::NotificationsReceived { @@ -654,7 +631,7 @@ impl Protocol { messages: vec![(msg.engine_id, From::from(msg.data))], } } else { - warn!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); + debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); CustomMessageOutcome::None }, GenericMessage::ConsensusBatch(messages) => { @@ -664,7 +641,7 @@ impl Protocol { if self.protocol_name_by_engine.contains_key(&msg.engine_id) { Some((msg.engine_id, From::from(msg.data))) } else { - warn!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); + debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); None } }) @@ -684,21 +661,6 @@ impl Protocol { CustomMessageOutcome::None } - fn send_message( - &mut self, - who: &PeerId, - message: Option<(Cow<'static, [u8]>, Vec)>, - legacy: Message, - ) { - send_message::( - &mut self.behaviour, - &mut self.context_data.stats, - who, - message, - legacy, - ); - } - fn update_peer_request(&mut self, who: &PeerId, request: &mut message::BlockRequest) { update_peer_request::(&mut self.context_data.peers, who, request) } @@ -724,92 +686,6 @@ impl Protocol { } } - fn on_block_request(&mut self, peer: PeerId, request: message::BlockRequest) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["block-request"]).inc(); - } - - trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?} for {:?}", - request.id, - peer, - request.from, - request.to, - request.max, - request.fields, - ); - - // sending block requests to the node that is unable to serve it is considered a bad behavior - if !self.config.roles.is_full() { - trace!(target: "sync", "Peer {} is trying to sync from the light node", peer); - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::UNEXPECTED_REQUEST); - return; - } - - let mut blocks = Vec::new(); - let mut id = match request.from { - message::FromBlock::Hash(h) => BlockId::Hash(h), - message::FromBlock::Number(n) => BlockId::Number(n), - }; - let max = cmp::min(request.max.unwrap_or(u32::max_value()), MAX_BLOCK_DATA_RESPONSE) as usize; - let get_header = request.fields.contains(message::BlockAttributes::HEADER); - let get_body = request.fields.contains(message::BlockAttributes::BODY); - let get_justification = request - .fields - .contains(message::BlockAttributes::JUSTIFICATION); - let mut total_size = 0; - while let Some(header) = self.context_data.chain.header(id).unwrap_or(None) { - if blocks.len() >= max || (blocks.len() >= 1 && total_size > MAX_BODIES_BYTES) { - break; - } - let number = *header.number(); - let hash = header.hash(); - let parent_hash = *header.parent_hash(); - let justification = if get_justification { - self.context_data.chain.justification(&BlockId::Hash(hash)).unwrap_or(None) - } else { - None - }; - let block_data = message::generic::BlockData { - hash, - header: if get_header { Some(header) } else { None }, - body: if get_body { - self.context_data - .chain - .block_body(&BlockId::Hash(hash)) - .unwrap_or(None) - } else { - None - }, - receipt: None, - message_queue: None, - justification, - }; - // Stop if we don't have requested block body - if get_body && block_data.body.is_none() { - trace!(target: "sync", "Missing data for block request."); - break; - } - total_size += block_data.body.as_ref().map_or(0, |b| b.len()); - blocks.push(block_data); - match request.direction { - message::Direction::Ascending => id = BlockId::Number(number + One::one()), - message::Direction::Descending => { - if number.is_zero() { - break; - } - id = BlockId::Hash(parent_hash) - } - } - } - let response = message::generic::BlockResponse { - id: request.id, - blocks, - }; - trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); - self.send_message(&peer, None, GenericMessage::BlockResponse(response)) - } - /// Adjusts the reputation of a node. pub fn report_peer(&self, who: PeerId, reputation: sc_peerset::ReputationChange) { self.peerset_handle.report_peer(who, reputation) @@ -954,99 +830,86 @@ impl Protocol { } } - /// Called on receipt of a status message via the legacy protocol on the first connection between two peers. - pub fn on_peer_connected( + /// Called on the first connection between two peers, after their exchange of handshake. + fn on_peer_connected( &mut self, who: PeerId, - status: message::Status, + status: BlockAnnouncesHandshake, notifications_sink: NotificationsSink, ) -> CustomMessageOutcome { trace!(target: "sync", "New peer {} {:?}", who, status); - let _protocol_version = { - if self.context_data.peers.contains_key(&who) { - debug!(target: "sync", "Ignoring duplicate status packet from {}", who); - return CustomMessageOutcome::None; - } - if status.genesis_hash != self.genesis_hash { - log!( - target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, - "Peer is on different chain (our genesis: {} theirs: {})", - self.genesis_hash, status.genesis_hash - ); - self.peerset_handle.report_peer(who.clone(), rep::GENESIS_MISMATCH); - self.behaviour.disconnect_peer(&who); - if self.boot_node_ids.contains(&who) { - error!( - target: "sync", - "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", - who, - self.genesis_hash, - status.genesis_hash, - ); - } + if self.context_data.peers.contains_key(&who) { + debug!(target: "sync", "Ignoring duplicate status packet from {}", who); + return CustomMessageOutcome::None; + } + if status.genesis_hash != self.genesis_hash { + log!( + target: "sync", + if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, + "Peer is on different chain (our genesis: {} theirs: {})", + self.genesis_hash, status.genesis_hash + ); + self.peerset_handle.report_peer(who.clone(), rep::GENESIS_MISMATCH); + self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; - } - if status.version < MIN_VERSION && CURRENT_VERSION < status.min_supported_version { - log!( + if self.boot_node_ids.contains(&who) { + error!( target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, - "Peer {:?} using unsupported protocol version {}", who, status.version + "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", + who, + self.genesis_hash, + status.genesis_hash, ); - self.peerset_handle.report_peer(who.clone(), rep::BAD_PROTOCOL); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; } - if self.config.roles.is_light() { - // we're not interested in light peers - if status.roles.is_light() { - debug!(target: "sync", "Peer {} is unable to serve light requests", who); - self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; - } + return CustomMessageOutcome::None; + } - // we don't interested in peers that are far behind us - let self_best_block = self - .context_data - .chain - .info() - .best_number; - let blocks_difference = self_best_block - .checked_sub(&status.best_number) - .unwrap_or_else(Zero::zero) - .saturated_into::(); - if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { - debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); - self.peerset_handle.report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; - } + if self.config.roles.is_light() { + // we're not interested in light peers + if status.roles.is_light() { + debug!(target: "sync", "Peer {} is unable to serve light requests", who); + self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); + self.behaviour.disconnect_peer(&who); + return CustomMessageOutcome::None; } - let peer = Peer { - info: PeerInfo { - protocol_version: status.version, - roles: status.roles, - best_hash: status.best_hash, - best_number: status.best_number - }, - block_request: None, - known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) - .expect("Constant is nonzero")), - known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) - .expect("Constant is nonzero")), - next_request_id: 0, - obsolete_requests: HashMap::new(), - }; - self.context_data.peers.insert(who.clone(), peer); + // we don't interested in peers that are far behind us + let self_best_block = self + .context_data + .chain + .info() + .best_number; + let blocks_difference = self_best_block + .checked_sub(&status.best_number) + .unwrap_or_else(Zero::zero) + .saturated_into::(); + if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { + debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); + self.peerset_handle.report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); + self.behaviour.disconnect_peer(&who); + return CustomMessageOutcome::None; + } + } - debug!(target: "sync", "Connected {}", who); - status.version + let peer = Peer { + info: PeerInfo { + roles: status.roles, + best_hash: status.best_hash, + best_number: status.best_number + }, + block_request: None, + known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) + .expect("Constant is nonzero")), + known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) + .expect("Constant is nonzero")), + next_request_id: 0, + obsolete_requests: HashMap::new(), }; + self.context_data.peers.insert(who.clone(), peer); + + debug!(target: "sync", "Connected {}", who); let info = self.context_data.peers.get(&who).expect("We just inserted above; QED").info.clone(); self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); @@ -1084,7 +947,7 @@ impl Protocol { pub fn register_notifications_protocol<'a>( &'a mut self, engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol_name: impl Into>, handshake_message: Vec, ) -> impl Iterator + 'a { let protocol_name = protocol_name.into(); @@ -1213,14 +1076,11 @@ impl Protocol { .push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - let encoded = to_send.encode(); - send_message:: ( - &mut self.behaviour, - &mut self.context_data.stats, - &who, - Some((self.transactions_protocol.clone(), encoded)), - GenericMessage::Transactions(to_send) - ) + self.behaviour.write_notification( + who, + self.transactions_protocol.clone(), + to_send.encode() + ); } } @@ -1279,31 +1139,19 @@ impl Protocol { if inserted || force { let message = message::BlockAnnounce { header: header.clone(), - state: if peer.info.protocol_version >= 4 { - if is_best { - Some(message::BlockState::Best) - } else { - Some(message::BlockState::Normal) - } - } else { - None - }, - data: if peer.info.protocol_version >= 4 { - Some(data.clone()) + state: if is_best { + Some(message::BlockState::Best) } else { - None + Some(message::BlockState::Normal) }, + data: Some(data.clone()), }; - let encoded = message.encode(); - - send_message:: ( - &mut self.behaviour, - &mut self.context_data.stats, - &who, - Some((self.block_announces_protocol.clone(), encoded)), - Message::::BlockAnnounce(message), - ) + self.behaviour.write_notification( + who, + self.block_announces_protocol.clone(), + message.encode() + ); } } } @@ -1391,51 +1239,6 @@ impl Protocol { self.sync.on_block_finalized(&hash, *header.number()) } - fn on_remote_call_request( - &mut self, - who: PeerId, - request: message::RemoteCallRequest, - ) { - trace!(target: "sync", "Remote call request {} from {} ({} at {})", - request.id, - who, - request.method, - request.block - ); - - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-call"]).inc(); - } - - let proof = match self.context_data.chain.execution_proof( - &BlockId::Hash(request.block), - &request.method, - &request.data, - ) { - Ok((_, proof)) => proof, - Err(error) => { - trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", - request.id, - who, - request.method, - request.block, - error - ); - self.peerset_handle.report_peer(who.clone(), rep::RPC_FAILED); - StorageProof::empty() - } - }; - - self.send_message( - &who, - None, - GenericMessage::RemoteCallResponse(message::RemoteCallResponse { - id: request.id, - proof, - }), - ); - } - /// Request a justification for the given block. /// /// Uses `protocol` to queue a new justification request and tries to dispatch all pending @@ -1522,257 +1325,6 @@ impl Protocol { self.sync.on_finality_proof_import(request_block, finalization_result) } - fn on_remote_read_request( - &mut self, - who: PeerId, - request: message::RemoteReadRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-read"]).inc(); - } - - if request.keys.is_empty() { - debug!(target: "sync", "Invalid remote read request sent by {}", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); - return; - } - - let keys_str = || match request.keys.len() { - 1 => HexDisplay::from(&request.keys[0]).to_string(), - _ => format!( - "{}..{}", - HexDisplay::from(&request.keys[0]), - HexDisplay::from(&request.keys[request.keys.len() - 1]), - ), - }; - - trace!(target: "sync", "Remote read request {} from {} ({} at {})", - request.id, who, keys_str(), request.block); - let proof = match self.context_data.chain.read_proof( - &BlockId::Hash(request.block), - &mut request.keys.iter().map(AsRef::as_ref) - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", - request.id, - who, - keys_str(), - request.block, - error - ); - StorageProof::empty() - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteReadResponse(message::RemoteReadResponse { - id: request.id, - proof, - }), - ); - } - - fn on_remote_read_child_request( - &mut self, - who: PeerId, - request: message::RemoteReadChildRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-child"]).inc(); - } - - if request.keys.is_empty() { - debug!(target: "sync", "Invalid remote child read request sent by {}", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); - return; - } - - let keys_str = || match request.keys.len() { - 1 => HexDisplay::from(&request.keys[0]).to_string(), - _ => format!( - "{}..{}", - HexDisplay::from(&request.keys[0]), - HexDisplay::from(&request.keys[request.keys.len() - 1]), - ), - }; - - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", - request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block); - let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match ChildType::from_prefixed_key(prefixed_key) { - Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), - None => Err("Invalid child storage key".into()), - }; - let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( - &BlockId::Hash(request.block), - &child_info, - &mut request.keys.iter().map(AsRef::as_ref), - )) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - HexDisplay::from(&request.storage_key), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteReadResponse(message::RemoteReadResponse { - id: request.id, - proof, - }), - ); - } - - fn on_remote_header_request( - &mut self, - who: PeerId, - request: message::RemoteHeaderRequest>, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-header"]).inc(); - } - - trace!(target: "sync", "Remote header proof request {} from {} ({})", - request.id, who, request.block); - let (header, proof) = match self.context_data.chain.header_proof(&BlockId::Number(request.block)) { - Ok((header, proof)) => (Some(header), proof), - Err(error) => { - trace!(target: "sync", "Remote header proof request {} from {} ({}) failed with: {}", - request.id, - who, - request.block, - error - ); - (Default::default(), StorageProof::empty()) - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { - id: request.id, - header, - proof, - }), - ); - } - - fn on_remote_changes_request( - &mut self, - who: PeerId, - request: message::RemoteChangesRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["remote-changes"]).inc(); - } - - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", - request.id, - who, - if let Some(sk) = request.storage_key.as_ref() { - format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&request.key)) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last - ); - let key = StorageKey(request.key); - let prefixed_key = request.storage_key.as_ref() - .map(|storage_key| PrefixedStorageKey::new_ref(storage_key)); - let (first, last, min, max) = (request.first, request.last, request.min, request.max); - let proof = match self.context_data.chain.key_changes_proof( - first, - last, - min, - max, - prefixed_key, - &key, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", - request.id, - who, - if let Some(sk) = request.storage_key.as_ref() { - format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&key.0)) - } else { - HexDisplay::from(&key.0).to_string() - }, - request.first, - request.last, - error - ); - ChangesProof:: { - max_block: Zero::zero(), - proof: vec![], - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { - id: request.id, - max: proof.max_block, - proof: proof.proof, - roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, - }), - ); - } - - fn on_finality_proof_request( - &mut self, - who: PeerId, - request: message::FinalityProofRequest, - ) { - if let Some(metrics) = &self.metrics { - metrics.legacy_requests_received.with_label_values(&["finality-proof"]).inc(); - } - - trace!(target: "sync", "Finality proof request from {} for {}", who, request.block); - let finality_proof = self.finality_proof_provider.as_ref() - .ok_or_else(|| String::from("Finality provider is not configured")) - .and_then(|provider| - provider.prove_finality(request.block, &request.request).map_err(|e| e.to_string()) - ); - let finality_proof = match finality_proof { - Ok(finality_proof) => finality_proof, - Err(error) => { - trace!(target: "sync", "Finality proof request from {} for {} failed with: {}", - who, - request.block, - error - ); - None - }, - }; - self.send_message( - &who, - None, - GenericMessage::FinalityProofResponse(message::FinalityProofResponse { - id: 0, - block: request.block, - proof: finality_proof, - }), - ); - } - /// Must be called after a [`CustomMessageOutcome::FinalityProofRequest`] has been emitted, /// to notify of the response having arrived. pub fn on_finality_proof_response( @@ -1907,24 +1459,6 @@ fn update_peer_request( } } -fn send_message( - behaviour: &mut GenericProto, - stats: &mut HashMap<&'static str, PacketStats>, - who: &PeerId, - message: Option<(Cow<'static, [u8]>, Vec)>, - legacy_message: Message, -) { - let encoded = legacy_message.encode(); - let mut stats = stats.entry(legacy_message.id()).or_default(); - stats.bytes_out += encoded.len() as u64; - stats.count_out += 1; - if let Some((proto, msg)) = message { - behaviour.write_notification(who, proto, msg, encoded); - } else { - behaviour.send_packet(who, encoded); - } -} - impl NetworkBehaviour for Protocol { type ProtocolsHandler = ::ProtocolsHandler; type OutEvent = CustomMessageOutcome; @@ -2034,9 +1568,20 @@ impl NetworkBehaviour for Protocol { let outcome = match event { GenericProtoOut::CustomProtocolOpen { peer_id, received_handshake, notifications_sink, .. } => { - match as Decode>::decode(&mut &received_handshake[..]) { - Ok(GenericMessage::Status(handshake)) => - self.on_peer_connected(peer_id, handshake, notifications_sink), + // `received_handshake` can be either a `Status` message if received from the + // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block + // announces substream. + match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + Ok(GenericMessage::Status(handshake)) => { + let handshake = BlockAnnouncesHandshake { + roles: handshake.roles, + best_number: handshake.best_number, + best_hash: handshake.best_hash, + genesis_hash: handshake.genesis_hash, + }; + + self.on_peer_connected(peer_id, handshake, notifications_sink) + }, Ok(msg) => { debug!( target: "sync", @@ -2048,15 +1593,23 @@ impl NetworkBehaviour for Protocol { CustomMessageOutcome::None } Err(err) => { - debug!( - target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {}", - peer_id, - received_handshake, - err.what() - ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None + match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + Ok(handshake) => { + self.on_peer_connected(peer_id, handshake, notifications_sink) + } + Err(err2) => { + debug!( + target: "sync", + "Couldn't decode handshake sent by {}: {:?}: {} & {}", + peer_id, + received_handshake, + err.what(), + err2, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + } + } } } } @@ -2099,7 +1652,7 @@ impl NetworkBehaviour for Protocol { } } None => { - error!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); + debug!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); CustomMessageOutcome::None } } diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index f965980640ad67e0c85b30ee26b502055760b3f9..e7e2cb035d65c25447869117f7face34ea80c896 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -120,7 +120,7 @@ pub struct GenericProto { /// Notification protocols. Entries are only ever added and not removed. /// Contains, for each protocol, the protocol name and the message to send as part of the /// initial handshake. - notif_protocols: Vec<(Cow<'static, [u8]>, Arc>>)>, + notif_protocols: Vec<(Cow<'static, str>, Arc>>)>, /// Receiver for instructions about who to connect to or disconnect from. peerset: sc_peerset::Peerset, @@ -322,7 +322,7 @@ pub enum GenericProtoOut { /// Id of the peer the message came from. peer_id: PeerId, /// Engine corresponding to the message. - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, /// Message that has been received. message: BytesMut, }, @@ -336,14 +336,21 @@ impl GenericProto { versions: &[u8], handshake_message: Vec, peerset: sc_peerset::Peerset, + notif_protocols: impl Iterator, Vec)>, ) -> Self { + let notif_protocols = notif_protocols + .map(|(n, hs)| (n, Arc::new(RwLock::new(hs)))) + .collect::>(); + + assert!(!notif_protocols.is_empty()); + let legacy_handshake_message = Arc::new(RwLock::new(handshake_message)); let legacy_protocol = RegisteredProtocol::new(protocol, versions, legacy_handshake_message); GenericProto { local_peer_id, legacy_protocol, - notif_protocols: Vec::new(), + notif_protocols, peerset, peers: FnvHashMap::default(), delays: Default::default(), @@ -360,7 +367,7 @@ impl GenericProto { /// will retain the protocols that were registered then, and not any new one. pub fn register_notif_protocol( &mut self, - protocol_name: impl Into>, + protocol_name: impl Into>, handshake_msg: impl Into> ) { self.notif_protocols.push((protocol_name.into(), Arc::new(RwLock::new(handshake_msg.into())))); @@ -371,10 +378,10 @@ impl GenericProto { /// Has no effect if the protocol is unknown. pub fn set_notif_protocol_handshake( &mut self, - protocol_name: &[u8], + protocol_name: &str, handshake_message: impl Into> ) { - if let Some(protocol) = self.notif_protocols.iter_mut().find(|(name, _)| name == &protocol_name) { + if let Some(protocol) = self.notif_protocols.iter_mut().find(|(name, _)| name == protocol_name) { *protocol.1.write() = handshake_message.into(); } } @@ -551,9 +558,8 @@ impl GenericProto { pub fn write_notification( &mut self, target: &PeerId, - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, message: impl Into>, - encoded_fallback_message: Vec, ) { let notifs_sink = match self.peers.get(target).and_then(|p| p.get_open()) { None => { @@ -569,38 +575,15 @@ impl GenericProto { target: "sub-libp2p", "External API => Notification({:?}, {:?})", target, - str::from_utf8(&protocol_name) + protocol_name, ); trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); notifs_sink.send_sync_notification( - &protocol_name, - encoded_fallback_message, + protocol_name, message ); } - /// Sends a message to a peer. - /// - /// Has no effect if the custom protocol is not open with the given peer. - /// - /// Also note that even we have a valid open substream, it may in fact be already closed - /// without us knowing, in which case the packet will not be received. - pub fn send_packet(&mut self, target: &PeerId, message: Vec) { - let notifs_sink = match self.peers.get(target).and_then(|p| p.get_open()) { - None => { - debug!(target: "sub-libp2p", - "Tried to sent packet to {:?} without an open channel.", - target); - return - } - Some(sink) => sink - }; - - trace!(target: "sub-libp2p", "External API => Packet for {:?}", target); - trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - notifs_sink.send_legacy(message); - } - /// Returns the state of the peerset manager, for debugging purposes. pub fn peerset_debug_info(&mut self) -> serde_json::Value { self.peerset.debug_info() @@ -1374,7 +1357,7 @@ impl NetworkBehaviour for GenericProto { target: "sub-libp2p", "Handler({:?}) => Notification({:?})", source, - str::from_utf8(&protocol_name) + protocol_name, ); trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); let event = GenericProtoOut::Notification { diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index dfa066936985e138940d12ba5ea288352c1fb0af..f355fba60fb04a7025d983e891a9b48f163649d9 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -107,9 +107,18 @@ pub struct NotifsHandler { /// Handlers for outbound substreams, and the initial handshake message we send. out_handlers: Vec<(NotifsOutHandler, Arc>>)>, + /// Whether we are the connection dialer or listener. + endpoint: ConnectedPoint, + /// Handler for backwards-compatibility. legacy: LegacyProtoHandler, + /// In the situation where either the legacy substream has been opened or the handshake-bearing + /// notifications protocol is open, but we haven't sent out any [`NotifsHandlerOut::Open`] + /// event yet, this contains the received handshake waiting to be reported through the + /// external API. + pending_handshake: Option>, + /// State of this handler. enabled: EnabledState, @@ -123,6 +132,9 @@ pub struct NotifsHandler { /// We use two different channels in order to have two different channel sizes, but from the /// receiving point of view, the two channels are the same. /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + /// + /// Contains `Some` if and only if it has been reported to the user that the substreams are + /// open. notifications_sink_rx: Option< stream::Select< stream::Fuse>, @@ -159,7 +171,9 @@ impl IntoProtocolsHandler for NotifsHandlerProto { .into_iter() .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) .collect(), + endpoint: connected_point.clone(), legacy: self.legacy.into_handler(remote_peer_id, connected_point), + pending_handshake: None, enabled: EnabledState::Initial, pending_in: Vec::new(), notifications_sink_rx: None, @@ -211,7 +225,7 @@ pub enum NotifsHandlerOut { /// Received a message on a custom protocol substream. Notification { /// Name of the protocol of the message. - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, /// Message that has been received. message: BytesMut, @@ -249,16 +263,10 @@ struct NotificationsSinkInner { /// dedicated to the peer. #[derive(Debug)] enum NotificationsSinkMessage { - /// Message emitted by [`NotificationsSink::send_legacy`]. - Legacy { - message: Vec, - }, - /// Message emitted by [`NotificationsSink::reserve_notification`] and /// [`NotificationsSink::write_notification_now`]. Notification { - protocol_name: Vec, - encoded_fallback_message: Vec, + protocol_name: Cow<'static, str>, message: Vec, }, @@ -267,26 +275,6 @@ enum NotificationsSinkMessage { } impl NotificationsSink { - /// Sends a message to the peer using the legacy substream. - /// - /// If too many messages are already buffered, the message is silently discarded and the - /// connection to the peer will be closed shortly after. - /// - /// This method will be removed in a future version. - pub fn send_legacy<'a>(&'a self, message: impl Into>) { - let mut lock = self.inner.sync_channel.lock(); - let result = lock.try_send(NotificationsSinkMessage::Legacy { - message: message.into() - }); - - if result.is_err() { - // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the - // buffer, and therefore that `try_send` will succeed. - let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); - debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); - } - } - /// Sends a notification to the peer. /// /// If too many messages are already buffered, the notification is silently discarded and the @@ -298,14 +286,12 @@ impl NotificationsSink { /// This method will be removed in a future version. pub fn send_sync_notification<'a>( &'a self, - protocol_name: &[u8], - encoded_fallback_message: impl Into>, + protocol_name: Cow<'static, str>, message: impl Into> ) { let mut lock = self.inner.sync_channel.lock(); let result = lock.try_send(NotificationsSinkMessage::Notification { - protocol_name: protocol_name.to_owned(), - encoded_fallback_message: encoded_fallback_message.into(), + protocol_name, message: message.into() }); @@ -323,12 +309,12 @@ impl NotificationsSink { /// /// The protocol name is expected to be checked ahead of calling this method. It is a logic /// error to send a notification using an unknown protocol. - pub async fn reserve_notification<'a>(&'a self, protocol_name: &[u8]) -> Result, ()> { + pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { let mut lock = self.inner.async_channel.lock().await; let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; if poll_ready.is_ok() { - Ok(Ready { protocol_name: protocol_name.to_owned(), lock }) + Ok(Ready { protocol_name: protocol_name, lock }) } else { Err(()) } @@ -342,7 +328,7 @@ pub struct Ready<'a> { /// Guarded channel. The channel inside is guaranteed to not be full. lock: FuturesMutexGuard<'a, mpsc::Sender>, /// Name of the protocol. Should match one of the protocols passed at initialization. - protocol_name: Vec, + protocol_name: Cow<'static, str>, } impl<'a> Ready<'a> { @@ -351,12 +337,10 @@ impl<'a> Ready<'a> { /// Returns an error if the substream has been closed. pub fn send( mut self, - encoded_fallback_message: impl Into>, notification: impl Into> ) -> Result<(), ()> { self.lock.start_send(NotificationsSinkMessage::Notification { protocol_name: self.protocol_name, - encoded_fallback_message: encoded_fallback_message.into(), message: notification.into(), }).map_err(|_| ()) } @@ -377,11 +361,20 @@ impl NotifsHandlerProto { /// `list` is a list of notification protocols names, and the message to send as part of the /// handshake. At the moment, the message is always the same whether we open a substream /// ourselves or respond to handshake from the remote. + /// + /// The first protocol in `list` is special-cased as the protocol that contains the handshake + /// to report through the [`NotifsHandlerOut::Open`] event. + /// + /// # Panic + /// + /// - Panics if `list` is empty. + /// pub fn new( legacy: RegisteredProtocol, - list: impl Into, Arc>>)>>, + list: impl Into, Arc>>)>>, ) -> Self { let list = list.into(); + assert!(!list.is_empty()); let out_handlers = list .clone() @@ -411,25 +404,27 @@ impl ProtocolsHandler for NotifsHandler { type OutboundProtocol = EitherUpgrade; // Index within the `out_handlers`; None for legacy type OutboundOpenInfo = Option; + type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { let in_handlers = self.in_handlers.iter() .map(|(h, _)| h.listen_protocol().into_upgrade().1) .collect::>(); let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); - SubstreamProtocol::new(proto) + SubstreamProtocol::new(proto, ()) } fn inject_fully_negotiated_inbound( &mut self, - out: >::Output + out: >::Output, + (): () ) { match out { EitherOutput::First((out, num)) => - self.in_handlers[num].0.inject_fully_negotiated_inbound(out), + self.in_handlers[num].0.inject_fully_negotiated_inbound(out, ()), EitherOutput::Second(out) => - self.legacy.inject_fully_negotiated_inbound(out), + self.legacy.inject_fully_negotiated_inbound(out, ()), } } @@ -589,26 +584,38 @@ impl ProtocolsHandler for NotifsHandler { }; match message { - NotificationsSinkMessage::Legacy { message } => { - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { - message - }); - } NotificationsSinkMessage::Notification { protocol_name, - encoded_fallback_message, message } => { + let mut found_any_with_name = false; + for (handler, _) in &mut self.out_handlers { - if handler.protocol_name() == &protocol_name[..] && handler.is_open() { - handler.send_or_discard(message); - continue 'poll_notifs_sink; + if *handler.protocol_name() == protocol_name { + found_any_with_name = true; + if handler.is_open() { + handler.send_or_discard(message); + continue 'poll_notifs_sink; + } } } - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { - message: encoded_fallback_message, - }); + // This code can be reached via the following scenarios: + // + // - User tried to send a notification on a non-existing protocol. This + // most likely relates to https://github.com/paritytech/substrate/issues/6827 + // - User tried to send a notification to a peer we're not or no longer + // connected to. This happens in a normal scenario due to the racy nature + // of connections and disconnections, and is benign. + // + // We print a warning in the former condition. + if !found_any_with_name { + log::warn!( + target: "sub-libp2p", + "Tried to send a notification on non-registered protocol: {:?}", + protocol_name + ); + } } NotificationsSinkMessage::ForceClose => { return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)); @@ -617,59 +624,68 @@ impl ProtocolsHandler for NotifsHandler { } } - if let Poll::Ready(ev) = self.legacy.poll(cx) { - return match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => - Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(EitherUpgrade::B), - info: None, - }), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { - endpoint, - received_handshake, - .. - }) => { - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), + // If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing + // substream (either the legacy substream or the one special-cased as providing the + // handshake) is open but the user isn't aware yet of the substreams being open. + // When that is the case, neither the legacy substream nor the incoming notifications + // substreams should be polled, otherwise there is a risk of receiving messages from them. + if self.pending_handshake.is_none() { + while let Poll::Ready(ev) = self.legacy.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol + .map_upgrade(EitherUpgrade::B) + .map_info(|()| None) }), - }; - - debug_assert!(self.notifications_sink_rx.is_none()); - self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); - - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } - )) - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { endpoint, reason }) => { - // We consciously drop the receivers despite notifications being potentially - // still buffered up. - debug_assert!(self.notifications_sink_rx.is_some()); - self.notifications_sink_rx = None; - - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { endpoint, reason } - )) - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => - Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::ProtocolError { is_severe, error } - )), - ProtocolsHandlerEvent::Close(err) => - Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { + received_handshake, + .. + }) => { + if self.notifications_sink_rx.is_none() { + debug_assert!(self.pending_handshake.is_none()); + self.pending_handshake = Some(received_handshake); + } + cx.waker().wake_by_ref(); + return Poll::Pending; + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => { + // We consciously drop the receivers despite notifications being potentially + // still buffered up. + self.notifications_sink_rx = None; + + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason } + )) + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )) + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::ProtocolError { is_severe, error } + )), + ProtocolsHandlerEvent::Close(err) => + return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), + } } } for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { + loop { + let poll = if self.notifications_sink_rx.is_some() { + handler.poll(cx) + } else { + handler.poll_process(cx) + }; + + let ev = match poll { + Poll::Ready(e) => e, + Poll::Pending => break, + }; + match ev { ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => error!("Incoming substream handler tried to open a substream"), @@ -688,12 +704,11 @@ impl ProtocolsHandler for NotifsHandler { }, ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - // Note that right now the legacy substream has precedence over - // everything. If it is not open, then we consider that nothing is open. - if self.legacy.is_open() { + debug_assert!(self.pending_handshake.is_none()); + if self.notifications_sink_rx.is_some() { let msg = NotifsHandlerOut::Notification { message, - protocol_name: handler.protocol_name().to_owned().into(), + protocol_name: handler.protocol_name().clone(), }; return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); } @@ -705,19 +720,25 @@ impl ProtocolsHandler for NotifsHandler { for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { while let Poll::Ready(ev) = handler.poll(cx) { match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(EitherUpgrade::A), - info: Some(handler_num), + protocol: protocol + .map_upgrade(EitherUpgrade::A) + .map_info(|()| Some(handler_num)) }), ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - // At the moment we don't actually care whether any notifications protocol - // opens or closes. - // Whether our communications with the remote are open or closed entirely - // depends on the legacy substream, because as long as we are open the user of - // this struct might try to send legacy protocol messages which we need to - // deliver for things to work properly. + // Opened substream on the handshake-bearing notification protocol. + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }) + if handler_num == 0 => + { + if self.notifications_sink_rx.is_none() && self.pending_handshake.is_none() { + self.pending_handshake = Some(handshake); + } + }, + + // Nothing to do in response to other notification substreams being opened + // or closed. ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, @@ -725,6 +746,30 @@ impl ProtocolsHandler for NotifsHandler { } } + if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { + if let Some(handshake) = self.pending_handshake.take() { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + debug_assert!(self.notifications_sink_rx.is_none()); + self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); + + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Open { + endpoint: self.endpoint.clone(), + received_handshake: handshake, + notifications_sink + } + )) + } + } + Poll::Pending } } diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs index 71d6175f06674e97156d945ec46bf5eb9ecfc17f..d17b5e612daf20678a765a8d4e79bc89423f7b44 100644 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ b/client/network/src/protocol/generic_proto/handler/legacy.rs @@ -204,12 +204,6 @@ pub enum LegacyProtoHandlerIn { /// The node should stop using custom protocols. Disable, - - /// Sends a message through a custom protocol substream. - SendCustomMessage { - /// The message to send. - message: Vec, - }, } /// Event that can be emitted by a `LegacyProtoHandler`. @@ -222,16 +216,12 @@ pub enum LegacyProtoHandlerOut { /// Handshake message that has been sent to us. /// This is normally a "Status" message, but this out of the concern of this code. received_handshake: Vec, - /// The connected endpoint. - endpoint: ConnectedPoint, }, /// Closed a custom protocol with the remote. CustomProtocolClosed { /// Reason why the substream closed, for diagnostic purposes. reason: Cow<'static, str>, - /// The connected endpoint. - endpoint: ConnectedPoint, }, /// Receives a message on a custom protocol substream. @@ -250,18 +240,6 @@ pub enum LegacyProtoHandlerOut { } impl LegacyProtoHandler { - /// Returns true if the legacy substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - ProtocolState::Init { substreams, .. } => !substreams.is_empty(), - ProtocolState::Opening { .. } => false, - ProtocolState::Normal { substreams, .. } => !substreams.is_empty(), - ProtocolState::Disabled { .. } => false, - ProtocolState::KillAsap => false, - ProtocolState::Poisoned => false, - } - } - /// Enables the handler. fn enable(&mut self) { self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { @@ -275,8 +253,7 @@ impl LegacyProtoHandler { if incoming.is_empty() { if let ConnectedPoint::Dialer { .. } = self.endpoint { self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.protocol.clone()), - info: (), + protocol: SubstreamProtocol::new(self.protocol.clone(), ()), }); } ProtocolState::Opening { @@ -285,7 +262,6 @@ impl LegacyProtoHandler { } else { let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: incoming[0].0.protocol_version(), - endpoint: self.endpoint.clone(), received_handshake: mem::replace(&mut incoming[0].1, Vec::new()), }; self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); @@ -399,7 +375,6 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: "Legacy substream clogged".into(), - endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -413,7 +388,6 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: "All substreams have been closed by the remote".into(), - endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -426,7 +400,6 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: format!("Error on the last substream: {:?}", err).into(), - endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -454,8 +427,7 @@ impl LegacyProtoHandler { deadline: Delay::new(Duration::from_secs(60)) }; Some(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.protocol.clone()), - info: (), + protocol: SubstreamProtocol::new(self.protocol.clone(), ()), }) } else { self.state = ProtocolState::Disabled { shutdown, reenable }; @@ -492,7 +464,6 @@ impl LegacyProtoHandler { ProtocolState::Opening { .. } => { let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: substream.protocol_version(), - endpoint: self.endpoint.clone(), received_handshake, }; self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); @@ -516,17 +487,6 @@ impl LegacyProtoHandler { ProtocolState::KillAsap => ProtocolState::KillAsap, }; } - - /// Sends a message to the remote. - fn send_message(&mut self, message: Vec) { - match self.state { - ProtocolState::Normal { ref mut substreams, .. } => - substreams[0].send_message(message), - - _ => debug!(target: "sub-libp2p", "Tried to send message over closed protocol \ - with {:?}", self.remote_peer_id) - } - } } impl ProtocolsHandler for LegacyProtoHandler { @@ -536,14 +496,16 @@ impl ProtocolsHandler for LegacyProtoHandler { type InboundProtocol = RegisteredProtocol; type OutboundProtocol = RegisteredProtocol; type OutboundOpenInfo = (); + type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.protocol.clone()) + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.protocol.clone(), ()) } fn inject_fully_negotiated_inbound( &mut self, - (substream, handshake): >::Output + (substream, handshake): >::Output, + (): () ) { self.inject_fully_negotiated(substream, handshake); } @@ -560,12 +522,9 @@ impl ProtocolsHandler for LegacyProtoHandler { match message { LegacyProtoHandlerIn::Disable => self.disable(), LegacyProtoHandlerIn::Enable => self.enable(), - LegacyProtoHandlerIn::SendCustomMessage { message } => - self.send_message(message), } } - #[inline] fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { let is_severe = match err { ProtocolsHandlerUpgrErr::Upgrade(_) => true, diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs index ddd78566fcd2a7b773205d49f00c26e885da413d..d3b505e0de3e26fcb6c5cb234fa8e8a7ecd48b10 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_in.rs @@ -109,7 +109,7 @@ pub enum NotifsInHandlerOut { impl NotifsInHandlerProto { /// Builds a new `NotifsInHandlerProto`. pub fn new( - protocol_name: impl Into> + protocol_name: impl Into> ) -> Self { NotifsInHandlerProto { in_protocol: NotificationsIn::new(protocol_name), @@ -136,9 +136,36 @@ impl IntoProtocolsHandler for NotifsInHandlerProto { impl NotifsInHandler { /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &[u8] { + pub fn protocol_name(&self) -> &Cow<'static, str> { self.in_protocol.protocol_name() } + + /// Equivalent to the `poll` method of `ProtocolsHandler`, except that it is guaranteed to + /// never generate [`NotifsInHandlerOut::Notif`]. + /// + /// Use this method in situations where it is not desirable to receive events but still + /// necessary to drive any potential incoming handshake or request. + pub fn poll_process( + &mut self, + cx: &mut Context + ) -> Poll< + ProtocolsHandlerEvent + > { + if let Some(event) = self.events_queue.pop_front() { + return Poll::Ready(event) + } + + match self.substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { + None | Some(Poll::Pending) => {}, + Some(Poll::Ready(Ok(v))) => match v {}, + Some(Poll::Ready(Err(_))) => { + self.substream = None; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); + }, + } + + Poll::Pending + } } impl ProtocolsHandler for NotifsInHandler { @@ -148,14 +175,16 @@ impl ProtocolsHandler for NotifsInHandler { type InboundProtocol = NotificationsIn; type OutboundProtocol = DeniedUpgrade; type OutboundOpenInfo = (); + type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.in_protocol.clone()) + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.in_protocol.clone(), ()) } fn inject_fully_negotiated_inbound( &mut self, - (msg, proto): >::Output + (msg, proto): >::Output, + (): () ) { // If a substream already exists, we drop it and replace it with the new incoming one. if self.substream.is_some() { diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs index 14de382c1bbca69be5b2c7b9554fe299d8732857..414e62c0d135fb80cde4b5f15bea63cc7e50d6aa 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_out.rs @@ -57,13 +57,13 @@ const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); /// See the documentation of [`NotifsOutHandler`] for more information. pub struct NotifsOutHandlerProto { /// Name of the protocol to negotiate. - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, } impl NotifsOutHandlerProto { /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the /// notifications substream. - pub fn new(protocol_name: impl Into>) -> Self { + pub fn new(protocol_name: impl Into>) -> Self { NotifsOutHandlerProto { protocol_name: protocol_name.into(), } @@ -97,7 +97,7 @@ impl IntoProtocolsHandler for NotifsOutHandlerProto { /// the remote for the purpose of sending notifications to it. pub struct NotifsOutHandler { /// Name of the protocol to negotiate. - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, /// Relationship with the node we're connected to. state: State, @@ -203,8 +203,24 @@ impl NotifsOutHandler { } } + /// Returns `true` if there has been an attempt to open the substream, but the remote refused + /// the substream. + /// + /// Always returns `false` if the handler is in a disabled state. + pub fn is_refused(&self) -> bool { + match &self.state { + State::Disabled => false, + State::DisabledOpening => false, + State::DisabledOpen(_) => false, + State::Opening { .. } => false, + State::Refused => true, + State::Open { .. } => false, + State::Poisoned => false, + } + } + /// Returns the name of the protocol that we negotiate. - pub fn protocol_name(&self) -> &[u8] { + pub fn protocol_name(&self) -> &Cow<'static, str> { &self.protocol_name } @@ -251,14 +267,16 @@ impl ProtocolsHandler for NotifsOutHandler { type InboundProtocol = DeniedUpgrade; type OutboundProtocol = NotificationsOut; type OutboundOpenInfo = (); + type InboundOpenInfo = (); - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade) + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()) } fn inject_fully_negotiated_inbound( &mut self, - proto: >::Output + proto: >::Output, + (): () ) { // We should never reach here. `proto` is a `Void`. void::unreachable(proto) @@ -293,8 +311,7 @@ impl ProtocolsHandler for NotifsOutHandler { State::Disabled => { let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), - info: (), + protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), }); self.state = State::Opening { initial_message }; }, @@ -313,8 +330,7 @@ impl ProtocolsHandler for NotifsOutHandler { let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), - info: (), + protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), }); self.state = State::Opening { initial_message }; }, @@ -398,8 +414,7 @@ impl ProtocolsHandler for NotifsOutHandler { self.state = State::Opening { initial_message: initial_message.clone() }; let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), - info: (), + protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), }); return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); } diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs index cf9f72b89ba526ab5b6be974ebf7d1d383c39070..d604645d4ac872fc60f41b497a14b4cf5a7a10b4 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -16,26 +16,30 @@ #![cfg(test)] -use futures::{prelude::*, ready}; -use codec::{Encode, Decode}; -use libp2p::core::connection::{ConnectionId, ListenerId}; -use libp2p::core::ConnectedPoint; -use libp2p::swarm::{Swarm, ProtocolsHandler, IntoProtocolsHandler}; -use libp2p::swarm::{PollParameters, NetworkBehaviour, NetworkBehaviourAction}; -use libp2p::{PeerId, Multiaddr, Transport}; -use rand::seq::SliceRandom; -use std::{error, io, task::Context, task::Poll, time::Duration}; -use std::collections::HashSet; -use crate::protocol::message::{generic::BlockResponse, Message}; use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; -use sp_test_primitives::Block; + +use futures::prelude::*; +use libp2p::{PeerId, Multiaddr, Transport}; +use libp2p::core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, + muxing, + transport::MemoryTransport, + upgrade +}; +use libp2p::{identity, noise, yamux}; +use libp2p::swarm::{ + Swarm, ProtocolsHandler, IntoProtocolsHandler, PollParameters, + NetworkBehaviour, NetworkBehaviourAction +}; +use std::{error, io, iter, task::{Context, Poll}, time::Duration}; /// Builds two nodes that have each other as bootstrap nodes. /// This is to be used only for testing, and a panic will happen if something goes wrong. fn build_nodes() -> (Swarm, Swarm) { let mut out = Vec::with_capacity(2); - let keypairs: Vec<_> = (0..2).map(|_| libp2p::identity::Keypair::generate_ed25519()).collect(); + let keypairs: Vec<_> = (0..2).map(|_| identity::Keypair::generate_ed25519()).collect(); let addrs: Vec = (0..2) .map(|_| format!("/memory/{}", rand::random::()).parse().unwrap()) .collect(); @@ -43,25 +47,16 @@ fn build_nodes() -> (Swarm, Swarm) { for index in 0 .. 2 { let keypair = keypairs[index].clone(); let local_peer_id = keypair.public().into_peer_id(); - let transport = libp2p::core::transport::MemoryTransport - .and_then(move |out, endpoint| { - let secio = libp2p::secio::SecioConfig::new(keypair); - libp2p::core::upgrade::apply( - out, - secio, - endpoint, - libp2p::core::upgrade::Version::V1 - ) - }) - .and_then(move |(peer_id, stream), endpoint| { - libp2p::core::upgrade::apply( - stream, - libp2p::yamux::Config::default(), - endpoint, - libp2p::core::upgrade::Version::V1 - ) - .map_ok(|muxer| (peer_id, libp2p::core::muxing::StreamMuxerBox::new(muxer))) - }) + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(yamux::Config::default()) + .map(|(peer, muxer), _| (peer, muxing::StreamMuxerBox::new(muxer))) .timeout(Duration::from_secs(20)) .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) .boxed(); @@ -83,7 +78,10 @@ fn build_nodes() -> (Swarm, Swarm) { }); let behaviour = CustomProtoWithAddr { - inner: GenericProto::new(local_peer_id, &b"test"[..], &[1], vec![], peerset), + inner: GenericProto::new( + local_peer_id, "test", &[1], vec![], peerset, + iter::once(("/foo".into(), Vec::new())) + ), addrs: addrs .iter() .enumerate() @@ -216,137 +214,6 @@ impl NetworkBehaviour for CustomProtoWithAddr { } } -#[test] -fn two_nodes_transfer_lots_of_packets() { - // We spawn two nodes, then make the first one send lots of packets to the second one. The test - // ends when the second one has received all of them. - - // This test consists in transferring this given number of packets. Considering that (by - // design) the connection gets closed if one of the remotes can't follow the pace, this number - // should not exceed the size of the buffer of pending notifications. - const NUM_PACKETS: u32 = 512; - - let (mut service1, mut service2) = build_nodes(); - - let fut1 = future::poll_fn(move |cx| -> Poll<()> { - loop { - match ready!(service1.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { - for n in 0 .. NUM_PACKETS { - service1.send_packet( - &peer_id, - Message::::BlockResponse(BlockResponse { - id: n as _, - blocks: Vec::new(), - }).encode() - ); - } - }, - // An empty handshake is being sent after opening. - Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, - _ => panic!(), - } - } - }); - - let mut packet_counter = 0u32; - let fut2 = future::poll_fn(move |cx| { - loop { - match ready!(service2.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, - // An empty handshake is being sent after opening. - Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, - Some(GenericProtoOut::LegacyMessage { message, .. }) => { - match Message::::decode(&mut &message[..]).unwrap() { - Message::::BlockResponse(BlockResponse { id: _, blocks }) => { - assert!(blocks.is_empty()); - packet_counter += 1; - if packet_counter == NUM_PACKETS { - return Poll::Ready(()) - } - }, - _ => panic!(), - } - } - _ => panic!(), - } - } - }); - - futures::executor::block_on(async move { - future::select(fut1, fut2).await; - }); -} - -#[test] -fn basic_two_nodes_requests_in_parallel() { - let (mut service1, mut service2) = build_nodes(); - - // Generate random messages with or without a request id. - let mut to_send = { - let mut to_send = Vec::new(); - let mut existing_ids = HashSet::new(); - for _ in 0..200 { // Note: don't make that number too high or the CPU usage will explode. - let req_id = loop { - let req_id = rand::random::(); - - // ensure uniqueness - odds of randomly sampling collisions - // is unlikely, but possible to cause spurious test failures. - if existing_ids.insert(req_id) { - break req_id; - } - }; - - to_send.push(Message::::BlockResponse( - BlockResponse { id: req_id, blocks: Vec::new() } - )); - } - to_send - }; - - // Clone `to_send` in `to_receive`. Below we will remove from `to_receive` the messages we - // receive, until the list is empty. - let mut to_receive = to_send.clone(); - to_send.shuffle(&mut rand::thread_rng()); - - let fut1 = future::poll_fn(move |cx| -> Poll<()> { - loop { - match ready!(service1.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { - for msg in to_send.drain(..) { - service1.send_packet(&peer_id, msg.encode()); - } - }, - // An empty handshake is being sent after opening. - Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, - _ => panic!(), - } - } - }); - - let fut2 = future::poll_fn(move |cx| { - loop { - match ready!(service2.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, - // An empty handshake is being sent after opening. - Some(GenericProtoOut::LegacyMessage { message, .. }) if message.is_empty() => {}, - Some(GenericProtoOut::LegacyMessage { message, .. }) => { - let pos = to_receive.iter().position(|m| m.encode() == message).unwrap(); - to_receive.remove(pos); - if to_receive.is_empty() { - return Poll::Ready(()) - } - } - _ => panic!(), - } - } - }); - - futures::executor::block_on(async move { - future::select(fut1, fut2).await; - }); -} - #[test] fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index f56ab2450d43e467845c039f55ef3a52ed5a172a..1b2b97253d1aed125a2a39f5d3827e8d4f1e47f8 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -49,7 +49,7 @@ impl RegisteredProtocol { -> Self { let protocol = protocol.into(); let mut base_name = b"/substrate/".to_vec(); - base_name.extend_from_slice(protocol.as_bytes()); + base_name.extend_from_slice(protocol.as_ref().as_bytes()); base_name.extend_from_slice(b"/"); RegisteredProtocol { @@ -123,15 +123,6 @@ impl RegisteredProtocolSubstream { self.is_closing = true; self.send_queue.clear(); } - - /// Sends a message to the substream. - pub fn send_message(&mut self, data: Vec) { - if self.is_closing { - return - } - - self.send_queue.push_back(From::from(&data[..])); - } } /// Event produced by the `RegisteredProtocolSubstream`. diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs index 80fd7761f8088482f309d8ec772586287df8a35e..64b4b980da002d52dadcbb33f90e516445a5a7f1 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -39,7 +39,7 @@ use futures::prelude::*; use futures_codec::Framed; use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; use log::error; -use std::{borrow::Cow, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use std::{borrow::Cow, convert::Infallible, io, iter, mem, pin::Pin, task::{Context, Poll}}; use unsigned_varint::codec::UviBytes; /// Maximum allowed size of the two handshake messages, in bytes. @@ -50,7 +50,7 @@ const MAX_HANDSHAKE_SIZE: usize = 1024; #[derive(Debug, Clone)] pub struct NotificationsIn { /// Protocol name to use when negotiating the substream. - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, } /// Upgrade that opens a substream, waits for the remote to accept by sending back a status @@ -58,7 +58,7 @@ pub struct NotificationsIn { #[derive(Debug, Clone)] pub struct NotificationsOut { /// Protocol name to use when negotiating the substream. - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, /// Message to send when we start the handshake. initial_message: Vec, } @@ -100,14 +100,14 @@ pub struct NotificationsOutSubstream { impl NotificationsIn { /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>) -> Self { + pub fn new(protocol_name: impl Into>) -> Self { NotificationsIn { protocol_name: protocol_name.into(), } } /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &[u8] { + pub fn protocol_name(&self) -> &Cow<'static, str> { &self.protocol_name } } @@ -117,7 +117,11 @@ impl UpgradeInfo for NotificationsIn { type InfoIter = iter::Once; fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol_name.clone()) + let bytes: Cow<'static, [u8]> = match &self.protocol_name { + Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()), + Cow::Owned(s) => Cow::Owned(s.as_bytes().to_vec()) + }; + iter::once(bytes) } } @@ -144,7 +148,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, let mut initial_message = vec![0u8; initial_message_len]; if !initial_message.is_empty() { - socket.read(&mut initial_message).await?; + socket.read_exact(&mut initial_message).await?; } let substream = NotificationsInSubstream { @@ -158,7 +162,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, } impl NotificationsInSubstream -where TSubstream: AsyncRead + AsyncWrite, +where TSubstream: AsyncRead + AsyncWrite + Unpin, { /// Sends the handshake in order to inform the remote that we accept the substream. pub fn send_handshake(&mut self, message: impl Into>) { @@ -169,6 +173,48 @@ where TSubstream: AsyncRead + AsyncWrite, self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); } + + /// Equivalent to `Stream::poll_next`, except that it only drives the handshake and is + /// guaranteed to not generate any notification. + pub fn poll_process(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + loop { + match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { + NotificationsInSubstreamHandshake::PendingSend(msg) => + match Sink::poll_ready(this.socket.as_mut(), cx) { + Poll::Ready(_) => { + *this.handshake = NotificationsInSubstreamHandshake::Flush; + match Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg)) { + Ok(()) => {}, + Err(err) => return Poll::Ready(Err(err)), + } + }, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); + return Poll::Pending + } + }, + NotificationsInSubstreamHandshake::Flush => + match Sink::poll_flush(this.socket.as_mut(), cx)? { + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::Sent, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::Flush; + return Poll::Pending + } + }, + + st @ NotificationsInSubstreamHandshake::NotSent | + st @ NotificationsInSubstreamHandshake::Sent | + st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote | + st @ NotificationsInSubstreamHandshake::BothSidesClosed => { + *this.handshake = st; + return Poll::Pending; + } + } + } + } } impl Stream for NotificationsInSubstream @@ -244,7 +290,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, impl NotificationsOut { /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>, initial_message: impl Into>) -> Self { + pub fn new(protocol_name: impl Into>, initial_message: impl Into>) -> Self { let initial_message = initial_message.into(); if initial_message.len() > MAX_HANDSHAKE_SIZE { error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit"); @@ -262,7 +308,11 @@ impl UpgradeInfo for NotificationsOut { type InfoIter = iter::Once; fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol_name.clone()) + let bytes: Cow<'static, [u8]> = match &self.protocol_name { + Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()), + Cow::Owned(s) => Cow::Owned(s.as_bytes().to_vec()) + }; + iter::once(bytes) } } @@ -292,7 +342,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, let mut handshake = vec![0u8; handshake_len]; if !handshake.is_empty() { - socket.read(&mut handshake).await?; + socket.read_exact(&mut handshake).await?; } Ok((handshake, NotificationsOutSubstream { @@ -378,10 +428,11 @@ mod tests { use async_std::net::{TcpListener, TcpStream}; use futures::{prelude::*, channel::oneshot}; use libp2p::core::upgrade; + use std::borrow::Cow; #[test] fn basic_works() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -420,7 +471,7 @@ mod tests { fn empty_handshake() { // Check that everything still works when the handshake messages are empty. - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -457,7 +508,7 @@ mod tests { #[test] fn refused() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -495,7 +546,7 @@ mod tests { #[test] fn large_initial_message_refused() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -526,7 +577,7 @@ mod tests { #[test] fn large_handshake_refused() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index a7fbb92387cf6022e894accbdf4855520a912f15..1cd78c0ed1ddac6c764dc94843b67dcd8749dae9 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -41,12 +41,6 @@ pub type Message = generic::Message< ::Extrinsic, >; -/// Type alias for using the status type using block type parameters. -pub type Status = generic::Status< - ::Hash, - <::Header as HeaderT>::Number, ->; - /// Type alias for using the block request type using block type parameters. pub type BlockRequest = generic::BlockRequest< ::Hash, diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index d025b86b2536f5912ffa83a43dbbbfef1e56bed5..df336c25339fd8eb42b635735e7fab94fcd2feb6 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -463,7 +463,7 @@ mod tests { #[test] fn request_is_rescheduled_when_earlier_block_is_finalized() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut finality_proofs = ExtraRequests::::new("test"); diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs new file mode 100644 index 0000000000000000000000000000000000000000..5141e6db70141fc5e7be3465781ea4a8c08c5c9e --- /dev/null +++ b/client/network/src/request_responses.rs @@ -0,0 +1,872 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Collection of request-response protocols. +//! +//! The [`RequestResponse`] struct defined in this module provides support for zero or more +//! so-called "request-response" protocols. +//! +//! A request-response protocol works in the following way: +//! +//! - For every emitted request, a new substream is open and the protocol is negotiated. If the +//! remote supports the protocol, the size of the request is sent as a LEB128 number, followed +//! with the request itself. The remote then sends the size of the response as a LEB128 number, +//! followed with the response. +//! +//! - Requests have a certain time limit before they time out. This time includes the time it +//! takes to send/receive the request and response. +//! +//! - If provided, a ["requests processing"](ProtocolConfig::inbound_queue) channel +//! is used to handle incoming requests. +//! + +use futures::{channel::{mpsc, oneshot}, prelude::*}; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, Multiaddr, PeerId, + }, + request_response::{ + RequestResponse, RequestResponseCodec, RequestResponseConfig, RequestResponseEvent, + RequestResponseMessage, ResponseChannel, ProtocolSupport + }, + swarm::{ + protocols_handler::multi::MultiHandler, NetworkBehaviour, NetworkBehaviourAction, + PollParameters, ProtocolsHandler, + }, +}; +use std::{ + borrow::Cow, collections::{hash_map::Entry, HashMap}, convert::TryFrom as _, io, iter, + pin::Pin, task::{Context, Poll}, time::Duration, +}; + +pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; + +/// Configuration for a single request-response protocol. +#[derive(Debug, Clone)] +pub struct ProtocolConfig { + /// Name of the protocol on the wire. Should be something like `/foo/bar`. + pub name: Cow<'static, str>, + + /// Maximum allowed size, in bytes, of a request. + /// + /// Any request larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_request_size: u64, + + /// Maximum allowed size, in bytes, of a response. + /// + /// Any response larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_response_size: u64, + + /// Duration after which emitted requests are considered timed out. + /// + /// If you expect the response to come back quickly, you should set this to a smaller duration. + pub request_timeout: Duration, + + /// Channel on which the networking service will send incoming requests. + /// + /// Every time a peer sends a request to the local node using this protocol, the networking + /// service will push an element on this channel. The receiving side of this channel then has + /// to pull this element, process the request, and send back the response to send back to the + /// peer. + /// + /// The size of the channel has to be carefully chosen. If the channel is full, the networking + /// service will discard the incoming request send back an error to the peer. Consequently, + /// the channel being full is an indicator that the node is overloaded. + /// + /// You can typically set the size of the channel to `T / d`, where `T` is the + /// `request_timeout` and `d` is the expected average duration of CPU and I/O it takes to + /// build a response. + /// + /// Can be `None` if the local node does not support answering incoming requests. + /// If this is `None`, then the local node will not advertise support for this protocol towards + /// other peers. If this is `Some` but the channel is closed, then the local node will + /// advertise support for this protocol, but any incoming request will lead to an error being + /// sent back. + pub inbound_queue: Option>, +} + +/// A single request received by a peer on a request-response protocol. +#[derive(Debug)] +pub struct IncomingRequest { + /// Who sent the request. + pub peer: PeerId, + + /// Request sent by the remote. Will always be smaller than + /// [`ProtocolConfig::max_request_size`]. + pub payload: Vec, + + /// Channel to send back the response to. + pub pending_response: oneshot::Sender>, +} + +/// Event generated by the [`RequestResponsesBehaviour`]. +#[derive(Debug)] +pub enum Event { + /// A remote sent a request and either we have successfully answered it or an error happened. + /// + /// This event is generated for statistics purposes. + InboundRequest { + /// Peer which has emitted the request. + peer: PeerId, + /// Name of the protocol in question. + protocol: Cow<'static, str>, + /// If `Ok`, contains the time elapsed between when we received the request and when we + /// sent back the response. If `Err`, the error that happened. + result: Result, + }, + + /// A request initiated using [`RequestResponsesBehaviour::send_request`] has succeeded or + /// failed. + RequestFinished { + /// Request that has succeeded. + request_id: RequestId, + /// Response sent by the remote or reason for failure. + result: Result, RequestFailure>, + }, +} + +/// Implementation of `NetworkBehaviour` that provides support for request-response protocols. +pub struct RequestResponsesBehaviour { + /// The multiple sub-protocols, by name. + /// Contains the underlying libp2p `RequestResponse` behaviour, plus an optional + /// "response builder" used to build responses for incoming requests. + protocols: HashMap< + Cow<'static, str>, + (RequestResponse, Option>) + >, + + /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the + /// response to send back to the remote. + pending_responses: stream::FuturesUnordered< + Pin + Send>> + >, +} + +/// Generated by the response builder and waiting to be processed. +enum RequestProcessingOutcome { + Response { + protocol: Cow<'static, str>, + inner_channel: ResponseChannel, ()>>, + response: Vec, + }, + Busy { + peer: PeerId, + protocol: Cow<'static, str>, + }, +} + +impl RequestResponsesBehaviour { + /// Creates a new behaviour. Must be passed a list of supported protocols. Returns an error if + /// the same protocol is passed twice. + pub fn new(list: impl Iterator) -> Result { + let mut protocols = HashMap::new(); + for protocol in list { + let mut cfg = RequestResponseConfig::default(); + cfg.set_connection_keep_alive(Duration::from_secs(10)); + cfg.set_request_timeout(protocol.request_timeout); + + let protocol_support = if protocol.inbound_queue.is_some() { + ProtocolSupport::Full + } else { + ProtocolSupport::Outbound + }; + + let rq_rp = RequestResponse::new(GenericCodec { + max_request_size: protocol.max_request_size, + max_response_size: protocol.max_response_size, + }, iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), cfg); + + match protocols.entry(protocol.name) { + Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)), + Entry::Occupied(e) => + return Err(RegisterError::DuplicateProtocol(e.key().clone())), + }; + } + + Ok(Self { + protocols, + pending_responses: stream::FuturesUnordered::new(), + }) + } + + /// Initiates sending a request. + /// + /// An error is returned if we are not connected to the target peer or if the protocol doesn't + /// match one that has been registered. + pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) + -> Result + { + if let Some((protocol, _)) = self.protocols.get_mut(protocol) { + if protocol.is_connected(target) { + Ok(protocol.send_request(target, request)) + } else { + Err(SendRequestError::NotConnected) + } + } else { + Err(SendRequestError::UnknownProtocol) + } + } +} + +impl NetworkBehaviour for RequestResponsesBehaviour { + type ProtocolsHandler = MultiHandler< + String, + as NetworkBehaviour>::ProtocolsHandler, + >; + type OutEvent = Event; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let iter = self.protocols.iter_mut() + .map(|(p, (r, _))| (p.to_string(), NetworkBehaviour::new_handler(r))); + + MultiHandler::try_from_iter(iter) + .expect("Protocols are in a HashMap and there can be at most one handler per \ + protocol name, which is the only possible error; qed") + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_connection_established(p, peer_id, conn, endpoint) + } + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_connected(p, peer_id) + } + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_connection_closed(p, peer_id, conn, endpoint) + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_disconnected(p, peer_id) + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error + ) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_addr_reach_failure(p, peer_id, addr, error) + } + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + (p_name, event): ::OutEvent, + ) { + if let Some((proto, _)) = self.protocols.get_mut(&*p_name) { + return proto.inject_event(peer_id, connection, event) + } + + log::warn!(target: "sub-libp2p", + "inject_node_event: no request-response instance registered for protocol {:?}", + p_name) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_new_external_addr(p, addr) + } + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_expired_listen_addr(p, addr) + } + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_dial_failure(p, peer_id) + } + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_new_listen_addr(p, addr) + } + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_listener_error(p, id, err) + } + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_listener_closed(p, id, reason) + } + } + + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll< + NetworkBehaviourAction< + ::InEvent, + Self::OutEvent, + >, + > { + 'poll_all: loop { + // Poll to see if any response is ready to be sent back. + while let Poll::Ready(Some(result)) = self.pending_responses.poll_next_unpin(cx) { + match result { + RequestProcessingOutcome::Response { + protocol, inner_channel, response + } => { + if let Some((protocol, _)) = self.protocols.get_mut(&*protocol) { + protocol.send_response(inner_channel, Ok(response)); + } + } + RequestProcessingOutcome::Busy { peer, protocol } => { + let out = Event::InboundRequest { + peer, + protocol, + result: Err(ResponseFailure::Busy), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + } + } + + // Poll request-responses protocols. + for (protocol, (behaviour, resp_builder)) in &mut self.protocols { + while let Poll::Ready(ev) = behaviour.poll(cx, params) { + let ev = match ev { + // Main events we are interested in. + NetworkBehaviourAction::GenerateEvent(ev) => ev, + + // Other events generated by the underlying behaviour are transparently + // passed through. + NetworkBehaviourAction::DialAddress { address } => { + log::error!("The request-response isn't supposed to start dialing peers"); + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + } + NetworkBehaviourAction::DialPeer { peer_id, condition } => { + log::error!("The request-response isn't supposed to start dialing peers"); + return Poll::Ready(NetworkBehaviourAction::DialPeer { + peer_id, + condition, + }) + } + NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: ((*protocol).to_string(), event), + }) + } + NetworkBehaviourAction::ReportObservedAddr { address } => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + }) + } + }; + + match ev { + // Received a request from a remote. + RequestResponseEvent::Message { + peer, + message: RequestResponseMessage::Request { request, channel, .. }, + } => { + let (tx, rx) = oneshot::channel(); + + // Submit the request to the "response builder" passed by the user at + // initialization. + if let Some(resp_builder) = resp_builder { + // If the response builder is too busy, silently drop `tx`. + // This will be reported as a `Busy` error. + let _ = resp_builder.try_send(IncomingRequest { + peer: peer.clone(), + payload: request, + pending_response: tx, + }); + } + + let protocol = protocol.clone(); + self.pending_responses.push(Box::pin(async move { + // The `tx` created above can be dropped if we are not capable of + // processing this request, which is reflected as a "Busy" error. + if let Ok(response) = rx.await { + RequestProcessingOutcome::Response { + protocol, inner_channel: channel, response + } + } else { + RequestProcessingOutcome::Busy { peer, protocol } + } + })); + + // This `continue` makes sure that `pending_responses` gets polled + // after we have added the new element. + continue 'poll_all; + } + + // Received a response from a remote to one of our requests. + RequestResponseEvent::Message { + message: + RequestResponseMessage::Response { + request_id, + response, + }, + .. + } => { + let out = Event::RequestFinished { + request_id, + result: response.map_err(|()| RequestFailure::Refused), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + + // One of our requests has failed. + RequestResponseEvent::OutboundFailure { + request_id, + error, + .. + } => { + let out = Event::RequestFinished { + request_id, + result: Err(RequestFailure::Network(error)), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + + // Remote has tried to send a request but failed. + RequestResponseEvent::InboundFailure { peer, error, .. } => { + let out = Event::InboundRequest { + peer, + protocol: protocol.clone(), + result: Err(ResponseFailure::Network(error)), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + }; + } + } + + break Poll::Pending; + } + } +} + +/// Error when registering a protocol. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum RegisterError { + /// A protocol has been specified multiple times. + DuplicateProtocol(#[error(ignore)] Cow<'static, str>), +} + +/// Error when sending a request. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum SendRequestError { + /// We are not currently connected to the requested peer. + NotConnected, + /// Given protocol hasn't been registered. + UnknownProtocol, +} + +/// Error in a request. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum RequestFailure { + /// Remote has closed the substream before answering, thereby signaling that it considers the + /// request as valid, but refused to answer it. + Refused, + /// Problem on the network. + #[display(fmt = "Problem on the network")] + Network(#[error(ignore)] OutboundFailure), +} + +/// Error when processing a request sent by a remote. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum ResponseFailure { + /// Internal response builder is too busy to process this request. + Busy, + /// Problem on the network. + #[display(fmt = "Problem on the network")] + Network(#[error(ignore)] InboundFailure), +} + +/// Implements the libp2p [`RequestResponseCodec`] trait. Defines how streams of bytes are turned +/// into requests and responses and vice-versa. +#[derive(Debug, Clone)] +#[doc(hidden)] // Needs to be public in order to satisfy the Rust compiler. +pub struct GenericCodec { + max_request_size: u64, + max_response_size: u64, +} + +#[async_trait::async_trait] +impl RequestResponseCodec for GenericCodec { + type Protocol = Vec; + type Request = Vec; + type Response = Result, ()>; + + async fn read_request( + &mut self, + _: &Self::Protocol, + mut io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + // Read the length. + let length = unsigned_varint::aio::read_usize(&mut io).await + .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; + if length > usize::try_from(self.max_request_size).unwrap_or(usize::max_value()) { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Request size exceeds limit: {} > {}", length, self.max_request_size) + )); + } + + // Read the payload. + let mut buffer = vec![0; length]; + io.read_exact(&mut buffer).await?; + Ok(buffer) + } + + async fn read_response( + &mut self, + _: &Self::Protocol, + mut io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + // Note that this function returns a `Result>`. Returning an `Err` is + // considered as a protocol error and will result in the entire connection being closed. + // Returning `Ok(Err(_))` signifies that a response has successfully been fetched, and + // that this response is an error. + + // Read the length. + let length = match unsigned_varint::aio::read_usize(&mut io).await { + Ok(l) => l, + Err(unsigned_varint::io::ReadError::Io(err)) + if matches!(err.kind(), io::ErrorKind::UnexpectedEof) => + { + return Ok(Err(())); + } + Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), + }; + + if length > usize::try_from(self.max_response_size).unwrap_or(usize::max_value()) { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Response size exceeds limit: {} > {}", length, self.max_response_size) + )); + } + + // Read the payload. + let mut buffer = vec![0; length]; + io.read_exact(&mut buffer).await?; + Ok(Ok(buffer)) + } + + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + // TODO: check the length? + // Write the length. + { + let mut buffer = unsigned_varint::encode::usize_buffer(); + io.write_all(unsigned_varint::encode::usize(req.len(), &mut buffer)).await?; + } + + // Write the payload. + io.write_all(&req).await?; + + io.close().await?; + Ok(()) + } + + async fn write_response( + &mut self, + _: &Self::Protocol, + io: &mut T, + res: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + // If `res` is an `Err`, we jump to closing the substream without writing anything on it. + if let Ok(res) = res { + // TODO: check the length? + // Write the length. + { + let mut buffer = unsigned_varint::encode::usize_buffer(); + io.write_all(unsigned_varint::encode::usize(res.len(), &mut buffer)).await?; + } + + // Write the payload. + io.write_all(&res).await?; + } + + io.close().await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use futures::{channel::mpsc, prelude::*}; + use libp2p::identity::Keypair; + use libp2p::Multiaddr; + use libp2p::core::upgrade; + use libp2p::core::transport::{Transport, MemoryTransport}; + use libp2p::noise; + use libp2p::swarm::{Swarm, SwarmEvent}; + use std::{iter, time::Duration}; + + #[test] + fn basic_request_response_works() { + let protocol_name = "/test/req-rep/1"; + + // Build swarms whose behaviour is `RequestResponsesBehaviour`. + let mut swarms = (0..2) + .map(|_| { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(libp2p::yamux::Config::default()); + + let behaviour = { + let (tx, mut rx) = mpsc::channel(64); + + let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), + })).unwrap(); + + async_std::task::spawn(async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(b"this is a response".to_vec()); + } + }); + + b + }; + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }) + .collect::>(); + + // Ask `swarm[0]` to dial `swarm[1]`. There isn't any discovery mechanism in place in + // this test, so they wouldn't connect to each other. + { + let dial_addr = swarms[1].1.clone(); + Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); + } + + // Running `swarm[0]` in the background until a `InboundRequest` event happens, + // which is a hint about the test having ended. + async_std::task::spawn({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.next_event().await { + SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { + assert!(result.is_ok()); + break + }, + _ => {} + } + } + } + }); + + // Remove and run the remaining swarm. + let (mut swarm, _) = swarms.remove(0); + async_std::task::block_on(async move { + let mut sent_request_id = None; + + loop { + match swarm.next_event().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let id = swarm.send_request( + &peer_id, + protocol_name, + b"this is a request".to_vec() + ).unwrap(); + assert!(sent_request_id.is_none()); + sent_request_id = Some(id); + } + SwarmEvent::Behaviour(super::Event::RequestFinished { + request_id, + result, + }) => { + assert_eq!(Some(request_id), sent_request_id); + let result = result.unwrap(); + assert_eq!(result, b"this is a response"); + break; + } + _ => {} + } + } + }); + } + + #[test] + fn max_response_size_exceeded() { + let protocol_name = "/test/req-rep/1"; + + // Build swarms whose behaviour is `RequestResponsesBehaviour`. + let mut swarms = (0..2) + .map(|_| { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(libp2p::yamux::Config::default()); + + let behaviour = { + let (tx, mut rx) = mpsc::channel(64); + + let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 8, // <-- important for the test + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), + })).unwrap(); + + async_std::task::spawn(async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(b"this response exceeds the limit".to_vec()); + } + }); + + b + }; + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }) + .collect::>(); + + // Ask `swarm[0]` to dial `swarm[1]`. There isn't any discovery mechanism in place in + // this test, so they wouldn't connect to each other. + { + let dial_addr = swarms[1].1.clone(); + Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); + } + + // Running `swarm[0]` in the background until a `InboundRequest` event happens, + // which is a hint about the test having ended. + async_std::task::spawn({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.next_event().await { + SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { + assert!(result.is_ok()); + break + }, + _ => {} + } + } + } + }); + + // Remove and run the remaining swarm. + let (mut swarm, _) = swarms.remove(0); + async_std::task::block_on(async move { + let mut sent_request_id = None; + + loop { + match swarm.next_event().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let id = swarm.send_request( + &peer_id, + protocol_name, + b"this is a request".to_vec() + ).unwrap(); + assert!(sent_request_id.is_none()); + sent_request_id = Some(id); + } + SwarmEvent::Behaviour(super::Event::RequestFinished { + request_id, + result, + }) => { + assert_eq!(Some(request_id), sent_request_id); + match result { + Err(super::RequestFailure::Network(super::OutboundFailure::ConnectionClosed)) => {}, + _ => panic!() + } + break; + } + _ => {} + } + } + }); + } +} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index cc3821a455e9f8170dfeef5c02183839b6a143fc..59f55f01a45d169b62d8157351e4f04d9ac39bb8 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -20,7 +20,7 @@ //! //! There are two main structs in this module: [`NetworkWorker`] and [`NetworkService`]. //! The [`NetworkWorker`] *is* the network and implements the `Future` trait. It must be polled in -//! order fo the network to advance. +//! order for the network to advance. //! The [`NetworkService`] is merely a shared version of the [`NetworkWorker`]. You can obtain an //! `Arc` by calling [`NetworkWorker::service`]. //! @@ -28,8 +28,8 @@ //! which is then processed by [`NetworkWorker::poll`]. use crate::{ - ExHashT, NetworkStateInfo, - behaviour::{Behaviour, BehaviourOut}, + ExHashT, NetworkStateInfo, NetworkStatus, + behaviour::{self, Behaviour, BehaviourOut}, config::{parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, DhtEvent, discovery::DiscoveryConfig, @@ -42,18 +42,15 @@ use crate::{ protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, transport, ReputationChange, }; -use futures::prelude::*; +use futures::{channel::oneshot, prelude::*}; use libp2p::{PeerId, multiaddr, Multiaddr}; use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; use libp2p::kad::record; use libp2p::ping::handler::PingFailure; use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handler::NodeHandlerWrapperError}; use log::{error, info, trace, warn}; +use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; use parking_lot::Mutex; -use prometheus_endpoint::{ - register, Counter, CounterVec, Gauge, GaugeVec, Histogram, HistogramOpts, HistogramVec, Opts, - PrometheusError, Registry, U64, -}; use sc_peerset::PeersetHandle; use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; use sp_runtime::{ @@ -75,7 +72,11 @@ use std::{ }, task::Poll, }; +use wasm_timer::Instant; + +pub use behaviour::{ResponseFailure, InboundFailure, RequestFailure, OutboundFailure}; +mod metrics; mod out_events; #[cfg(test)] mod tests; @@ -101,7 +102,7 @@ pub struct NetworkService { /// that peer. Updated by the [`NetworkWorker`]. peers_notifications_sinks: Arc>>, /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: Mutex>>, + protocol_name_by_engine: Mutex>>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notifications_sizes_metric: Option, @@ -240,12 +241,6 @@ impl NetworkWorker { local_peer_id_legacy ); - // Initialize the metrics. - let metrics = match ¶ms.metrics_registry { - Some(registry) => Some(Metrics::register(®istry)?), - None => None - }; - let checker = params.on_demand.as_ref() .map(|od| od.checker().clone()) .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); @@ -260,7 +255,6 @@ impl NetworkWorker { local_peer_id.clone(), params.chain.clone(), params.transaction_pool, - params.finality_proof_provider.clone(), params.finality_proof_request_builder, params.protocol_id.clone(), peerset_config, @@ -315,16 +309,28 @@ impl NetworkWorker { config }; - let mut behaviour = Behaviour::new( - protocol, - params.role, - user_agent, - local_public, - block_requests, - finality_proof_requests, - light_client_handler, - discovery_config - ); + let mut behaviour = { + let result = Behaviour::new( + protocol, + params.role, + user_agent, + local_public, + block_requests, + finality_proof_requests, + light_client_handler, + discovery_config, + params.network_config.request_response_protocols, + ); + + match result { + Ok(b) => b, + Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => { + return Err(Error::DuplicateRequestResponseProtocol { + protocol: proto, + }) + }, + } + }; for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); @@ -353,6 +359,18 @@ impl NetworkWorker { (builder.build(), bandwidth) }; + // Initialize the metrics. + let metrics = match ¶ms.metrics_registry { + Some(registry) => { + Some(metrics::register(registry, MetricSources { + bandwidth: bandwidth.clone(), + major_syncing: is_major_syncing.clone(), + connected_peers: num_connected.clone(), + })?) + } + None => None + }; + // Listen on multiaddresses. for addr in ¶ms.network_config.listen_addresses { if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { @@ -399,9 +417,23 @@ impl NetworkWorker { peers_notifications_sinks, metrics, boot_node_ids, + pending_requests: HashMap::with_capacity(128), }) } + /// High-level network status information. + pub fn status(&self) -> NetworkStatus { + NetworkStatus { + sync_state: self.sync_state(), + best_seen_block: self.best_seen_block(), + num_sync_peers: self.num_sync_peers(), + num_connected_peers: self.num_connected_peers(), + num_active_peers: self.num_active_peers(), + total_bytes_inbound: self.total_bytes_inbound(), + total_bytes_outbound: self.total_bytes_outbound(), + } + } + /// Returns the total number of bytes received so far. pub fn total_bytes_inbound(&self) -> u64 { self.service.bandwidth.total_inbound() @@ -541,8 +573,6 @@ impl NetworkWorker { peer_id: Swarm::::local_peer_id(&swarm).to_base58(), listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), - total_bytes_inbound: self.service.bandwidth.total_inbound(), - total_bytes_outbound: self.service.bandwidth.total_outbound(), connected_peers, not_connected_peers, peerset: swarm.user_protocol_mut().peerset_debug_info(), @@ -575,6 +605,22 @@ impl NetworkService { &self.local_peer_id } + /// Set authorized peers. + /// + /// Need a better solution to manage authorized peers, but now just use reserved peers for + /// prototyping. + pub fn set_authorized_peers(&self, peers: HashSet) { + self.peerset.set_reserved_peers(peers) + } + + /// Set authorized_only flag. + /// + /// Need a better solution to decide authorized_only, but now just use reserved_only flag for + /// prototyping. + pub fn set_authorized_only(&self, reserved_only: bool) { + self.peerset.set_reserved_only(reserved_only) + } + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. /// Has no effect if the notifications channel with this protocol name is not open. /// @@ -593,7 +639,7 @@ impl NetworkService { /// > preventing the message from being delivered. /// /// The protocol must have been registered with `register_notifications_protocol` or - /// `NetworkConfiguration::notifications_protocols`. + /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). /// pub fn write_notification(&self, target: PeerId, engine_id: ConsensusEngineId, message: Vec) { // We clone the `NotificationsSink` in order to be able to unlock the network-wide @@ -614,18 +660,7 @@ impl NetworkService { // Determine the wire protocol name corresponding to this `engine_id`. let protocol_name = self.protocol_name_by_engine.lock().get(&engine_id).cloned(); if let Some(protocol_name) = protocol_name { - // For backwards-compatibility reason, we have to duplicate the message and pass it - // in the situation where the remote still uses the legacy substream. - let fallback = codec::Encode::encode(&{ - protocol::message::generic::Message::<(), (), (), ()>::Consensus({ - protocol::message::generic::ConsensusMessage { - engine_id, - data: message.clone(), - } - }) - }); - - sink.send_sync_notification(&protocol_name, fallback, message); + sink.send_sync_notification(protocol_name, message); } else { return; } @@ -647,10 +682,9 @@ impl NetworkService { /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation /// can only fail if the underlying notification substream or connection has suddenly closed. /// - /// An error is returned either by `notification_sender`, by [`NotificationSender::wait`], - /// or by [`NotificationSenderReady::send`] if there exists no open notifications substream - /// with that combination of peer and protocol, or if the remote has asked to close the - /// notifications substream. If that happens, it is guaranteed that an + /// An error is returned by [`NotificationSenderReady::send`] if there exists no open + /// notifications substream with that combination of peer and protocol, or if the remote + /// has asked to close the notifications substream. If that happens, it is guaranteed that an /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by /// [`NetworkService::event_stream`]. /// @@ -661,7 +695,7 @@ impl NetworkService { /// in which case enqueued notifications will be lost. /// /// The protocol must have been registered with `register_notifications_protocol` or - /// `NetworkConfiguration::notifications_protocols`. + /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). /// /// # Usage /// @@ -701,6 +735,10 @@ impl NetworkService { /// Notifications should be dropped /// if buffer is full /// + /// + /// See also the [`gossip`](crate::gossip) module for a higher-level way to send + /// notifications. + /// pub fn notification_sender( &self, target: PeerId, @@ -726,7 +764,6 @@ impl NetworkService { Ok(NotificationSender { sink, protocol_name, - engine_id, notification_size_metric: self.notifications_sizes_metric.as_ref().map(|histogram| { histogram.with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) }), @@ -743,12 +780,51 @@ impl NetworkService { /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory pub fn event_stream(&self, name: &'static str) -> impl Stream { - // Note: when transitioning to stable futures, remove the `Error` entirely let (tx, rx) = out_events::channel(name); let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); rx } + /// Sends a single targeted request to a specific peer. On success, returns the response of + /// the peer. + /// + /// Request-response protocols are a way to complement notifications protocols, but + /// notifications should remain the default ways of communicating information. For example, a + /// peer can announce something through a notification, after which the recipient can obtain + /// more information by performing a request. + /// As such, this function is meant to be called only with peers we are already connected to. + /// Calling this method with a `target` we are not connected to will *not* attempt to connect + /// to said peer. + /// + /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. + /// Such restrictions, if desired, need to be enforced at the call site(s). + /// + /// The protocol must have been registered through + /// [`NetworkConfiguration::request_response_protocols`]( + /// crate::config::NetworkConfiguration::request_response_protocols). + pub async fn request( + &self, + target: PeerId, + protocol: impl Into>, + request: Vec + ) -> Result, RequestFailure> { + let (tx, rx) = oneshot::channel(); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx + }); + + match rx.await { + Ok(v) => v, + // The channel can only be closed if the network worker no longer exists. If the + // network worker no longer exists, then all connections to `target` are necessarily + // closed, and we legitimately report this situation as a "ConnectionClosed". + Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), + } + } + /// Registers a new notifications protocol. /// /// After a protocol has been registered, you can call `write_notifications`. @@ -765,7 +841,7 @@ impl NetworkService { pub fn register_notifications_protocol( &self, engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol_name: impl Into>, ) { let protocol_name = protocol_name.into(); self.protocol_name_by_engine.lock().insert(engine_id, protocol_name.clone()); @@ -999,10 +1075,7 @@ pub struct NotificationSender { sink: NotificationsSink, /// Name of the protocol on the wire. - protocol_name: Cow<'static, [u8]>, - - /// Engine ID used for the fallback message. - engine_id: ConsensusEngineId, + protocol_name: Cow<'static, str>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. @@ -1013,11 +1086,10 @@ impl NotificationSender { /// Returns a future that resolves when the `NotificationSender` is ready to send a notification. pub async fn ready<'a>(&'a self) -> Result, NotificationSenderError> { Ok(NotificationSenderReady { - ready: match self.sink.reserve_notification(&self.protocol_name).await { + ready: match self.sink.reserve_notification(self.protocol_name.clone()).await { Ok(r) => r, Err(()) => return Err(NotificationSenderError::Closed), }, - engine_id: self.engine_id, notification_size_metric: self.notification_size_metric.clone(), }) } @@ -1028,9 +1100,6 @@ impl NotificationSender { pub struct NotificationSenderReady<'a> { ready: Ready<'a>, - /// Engine ID used for the fallback message. - engine_id: ConsensusEngineId, - /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notification_size_metric: Option, @@ -1045,18 +1114,8 @@ impl<'a> NotificationSenderReady<'a> { notification_size_metric.observe(notification.len() as f64); } - // For backwards-compatibility reason, we have to duplicate the message and pass it - // in the situation where the remote still uses the legacy substream. - let fallback = codec::Encode::encode(&{ - protocol::message::generic::Message::<(), (), (), ()>::Consensus({ - protocol::message::generic::ConsensusMessage { - engine_id: self.engine_id, - data: notification.clone(), - } - }) - }); - - self.ready.send(fallback, notification) + self.ready + .send(notification) .map_err(|()| NotificationSenderError::Closed) } } @@ -1087,9 +1146,15 @@ enum ServiceToWorkerMsg { AddKnownAddress(PeerId, Multiaddr), SyncFork(Vec, B::Hash, NumberFor), EventStream(out_events::Sender), + Request { + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + pending_response: oneshot::Sender, RequestFailure>>, + }, RegisterNotifProtocol { engine_id: ConsensusEngineId, - protocol_name: Cow<'static, [u8]>, + protocol_name: Cow<'static, str>, }, DisconnectPeer(PeerId), UpdateChain, @@ -1123,235 +1188,18 @@ pub struct NetworkWorker { metrics: Option, /// The `PeerId`'s of all boot nodes. boot_node_ids: Arc>, + /// Requests started using [`NetworkService::request`]. Includes the channel to send back the + /// response, when the request has started, and the name of the protocol for diagnostic + /// purposes. + pending_requests: HashMap< + behaviour::RequestId, + (oneshot::Sender, RequestFailure>>, Instant, String) + >, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. peers_notifications_sinks: Arc>>, } -struct Metrics { - // This list is ordered alphabetically - connections_closed_total: CounterVec, - connections_opened_total: CounterVec, - distinct_peers_connections_closed_total: Counter, - distinct_peers_connections_opened_total: Counter, - import_queue_blocks_submitted: Counter, - import_queue_finality_proofs_submitted: Counter, - import_queue_justifications_submitted: Counter, - incoming_connections_errors_total: CounterVec, - incoming_connections_total: Counter, - is_major_syncing: Gauge, - issued_light_requests: Counter, - kademlia_query_duration: HistogramVec, - kademlia_random_queries_total: CounterVec, - kademlia_records_count: GaugeVec, - kademlia_records_sizes_total: GaugeVec, - kbuckets_num_nodes: GaugeVec, - listeners_local_addresses: Gauge, - listeners_errors_total: Counter, - // Note: `network_bytes_total` is a monotonic gauge obtained by - // sampling an existing counter. - network_bytes_total: GaugeVec, - notifications_sizes: HistogramVec, - notifications_streams_closed_total: CounterVec, - notifications_streams_opened_total: CounterVec, - peers_count: Gauge, - peerset_num_discovered: Gauge, - peerset_num_requested: Gauge, - pending_connections: Gauge, - pending_connections_errors_total: CounterVec, - requests_in_total: HistogramVec, - requests_out_finished: HistogramVec, - requests_out_started_total: CounterVec, -} - -impl Metrics { - fn register(registry: &Registry) -> Result { - Ok(Self { - // This list is ordered alphabetically - connections_closed_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_connections_closed_total", - "Total number of connections closed, by direction and reason" - ), - &["direction", "reason"] - )?, registry)?, - connections_opened_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_connections_opened_total", - "Total number of connections opened by direction" - ), - &["direction"] - )?, registry)?, - distinct_peers_connections_closed_total: register(Counter::new( - "sub_libp2p_distinct_peers_connections_closed_total", - "Total number of connections closed with distinct peers" - )?, registry)?, - distinct_peers_connections_opened_total: register(Counter::new( - "sub_libp2p_distinct_peers_connections_opened_total", - "Total number of connections opened with distinct peers" - )?, registry)?, - import_queue_blocks_submitted: register(Counter::new( - "import_queue_blocks_submitted", - "Number of blocks submitted to the import queue.", - )?, registry)?, - import_queue_finality_proofs_submitted: register(Counter::new( - "import_queue_finality_proofs_submitted", - "Number of finality proofs submitted to the import queue.", - )?, registry)?, - import_queue_justifications_submitted: register(Counter::new( - "import_queue_justifications_submitted", - "Number of justifications submitted to the import queue.", - )?, registry)?, - incoming_connections_errors_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_incoming_connections_handshake_errors_total", - "Total number of incoming connections that have failed during the \ - initial handshake" - ), - &["reason"] - )?, registry)?, - incoming_connections_total: register(Counter::new( - "sub_libp2p_incoming_connections_total", - "Total number of incoming connections on the listening sockets" - )?, registry)?, - is_major_syncing: register(Gauge::new( - "sub_libp2p_is_major_syncing", "Whether the node is performing a major sync or not.", - )?, registry)?, - issued_light_requests: register(Counter::new( - "issued_light_requests", - "Number of light client requests that our node has issued.", - )?, registry)?, - kademlia_query_duration: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "sub_libp2p_kademlia_query_duration", - "Duration of Kademlia queries per query type" - ), - buckets: prometheus_endpoint::exponential_buckets(0.5, 2.0, 10) - .expect("parameters are always valid values; qed"), - }, - &["type"] - )?, registry)?, - kademlia_random_queries_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_kademlia_random_queries_total", - "Number of random Kademlia queries started" - ), - &["protocol"] - )?, registry)?, - kademlia_records_count: register(GaugeVec::new( - Opts::new( - "sub_libp2p_kademlia_records_count", - "Number of records in the Kademlia records store" - ), - &["protocol"] - )?, registry)?, - kademlia_records_sizes_total: register(GaugeVec::new( - Opts::new( - "sub_libp2p_kademlia_records_sizes_total", - "Total size of all the records in the Kademlia records store" - ), - &["protocol"] - )?, registry)?, - kbuckets_num_nodes: register(GaugeVec::new( - Opts::new( - "sub_libp2p_kbuckets_num_nodes", - "Number of nodes in the Kademlia k-buckets" - ), - &["protocol"] - )?, registry)?, - listeners_local_addresses: register(Gauge::new( - "sub_libp2p_listeners_local_addresses", "Number of local addresses we're listening on" - )?, registry)?, - listeners_errors_total: register(Counter::new( - "sub_libp2p_listeners_errors_total", - "Total number of non-fatal errors reported by a listener" - )?, registry)?, - network_bytes_total: register(GaugeVec::new( - Opts::new( - "sub_libp2p_network_bytes_total", - "Total bandwidth usage" - ), - &["direction"] - )?, registry)?, - notifications_sizes: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "sub_libp2p_notifications_sizes", - "Sizes of the notifications send to and received from all nodes" - ), - buckets: prometheus_endpoint::exponential_buckets(64.0, 4.0, 8) - .expect("parameters are always valid values; qed"), - }, - &["direction", "protocol"] - )?, registry)?, - notifications_streams_closed_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_notifications_streams_closed_total", - "Total number of notification substreams that have been closed" - ), - &["protocol"] - )?, registry)?, - notifications_streams_opened_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_notifications_streams_opened_total", - "Total number of notification substreams that have been opened" - ), - &["protocol"] - )?, registry)?, - peers_count: register(Gauge::new( - "sub_libp2p_peers_count", "Number of network gossip peers", - )?, registry)?, - peerset_num_discovered: register(Gauge::new( - "sub_libp2p_peerset_num_discovered", "Number of nodes stored in the peerset manager", - )?, registry)?, - peerset_num_requested: register(Gauge::new( - "sub_libp2p_peerset_num_requested", "Number of nodes that the peerset manager wants us to be connected to", - )?, registry)?, - pending_connections: register(Gauge::new( - "sub_libp2p_pending_connections", - "Number of connections in the process of being established", - )?, registry)?, - pending_connections_errors_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_pending_connections_errors_total", - "Total number of pending connection errors" - ), - &["reason"] - )?, registry)?, - requests_in_total: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "sub_libp2p_requests_in_total", - "Total number of requests received and answered" - ), - buckets: prometheus_endpoint::exponential_buckets(0.001, 2.0, 16) - .expect("parameters are always valid values; qed"), - }, - &["protocol"] - )?, registry)?, - requests_out_finished: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "sub_libp2p_requests_out_finished", - "Time between a request's start and finish (successful or not)" - ), - buckets: prometheus_endpoint::exponential_buckets(0.001, 2.0, 16) - .expect("parameters are always valid values; qed"), - }, - &["protocol"] - )?, registry)?, - requests_out_started_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_requests_out_started_total", - "Total number of requests emitted" - ), - &["protocol"] - )?, registry)?, - }) - } -} - impl Future for NetworkWorker { type Output = (); @@ -1376,7 +1224,22 @@ impl Future for NetworkWorker { } } + // At the time of writing of this comment, due to a high volume of messages, the network + // worker sometimes takes a long time to process the loop below. When that happens, the + // rest of the polling is frozen. In order to avoid negative side-effects caused by this + // freeze, a limit to the number of iterations is enforced below. If the limit is reached, + // the task is interrupted then scheduled again. + // + // This allows for a more even distribution in the time taken by each sub-part of the + // polling. + let mut num_iterations = 0; loop { + num_iterations += 1; + if num_iterations >= 100 { + cx.waker().wake_by_ref(); + break; + } + // Process the next message coming from the `NetworkService`. let msg = match this.from_service.poll_next_unpin(cx) { Poll::Ready(Some(msg)) => msg, @@ -1403,6 +1266,31 @@ impl Future for NetworkWorker { this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), + ServiceToWorkerMsg::Request { target, protocol, request, pending_response } => { + // Calling `send_request` can fail immediately in some circumstances. + // This is handled by sending back an error on the channel. + match this.network_service.send_request(&target, &protocol, request) { + Ok(request_id) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.requests_out_started_total + .with_label_values(&[&protocol]) + .inc(); + } + this.pending_requests.insert( + request_id, + (pending_response, Instant::now(), protocol.to_string()) + ); + }, + Err(behaviour::SendRequestError::NotConnected) => { + let err = RequestFailure::Network(OutboundFailure::ConnectionClosed); + let _ = pending_response.send(Err(err)); + }, + Err(behaviour::SendRequestError::UnknownProtocol) => { + let err = RequestFailure::Network(OutboundFailure::UnsupportedProtocols); + let _ = pending_response.send(Err(err)); + }, + } + }, ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { this.network_service .register_notifications_protocol(engine_id, protocol_name); @@ -1416,7 +1304,16 @@ impl Future for NetworkWorker { } } + // `num_iterations` serves the same purpose as in the previous loop. + // See the previous loop for explanations. + let mut num_iterations = 0; loop { + num_iterations += 1; + if num_iterations >= 1000 { + cx.waker().wake_by_ref(); + break; + } + // Process the next action coming from the network. let next_event = this.network_service.next_event(); futures::pin_mut!(next_event); @@ -1442,31 +1339,82 @@ impl Future for NetworkWorker { } this.import_queue.import_finality_proof(origin, hash, nb, proof); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::AnsweredRequest { protocol, build_time, .. })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. })) => { if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_in_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&protocol)]) - .observe(build_time.as_secs_f64()); + match result { + Ok(serve_time) => { + metrics.requests_in_success_total + .with_label_values(&[&protocol]) + .observe(serve_time.as_secs_f64()); + } + Err(err) => { + let reason = match err { + ResponseFailure::Busy => "busy", + ResponseFailure::Network(InboundFailure::Timeout) => "timeout", + ResponseFailure::Network(InboundFailure::UnsupportedProtocols) => + "unsupported", + ResponseFailure::Network(InboundFailure::ConnectionClosed) => + "connection-closed", + }; + + metrics.requests_in_failure_total + .with_label_values(&[&protocol, reason]) + .inc(); + } + } } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestStarted { protocol, .. })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { request_id, result })) => { + if let Some((send_back, started, protocol)) = this.pending_requests.remove(&request_id) { + if let Some(metrics) = this.metrics.as_ref() { + match &result { + Ok(_) => { + metrics.requests_out_success_total + .with_label_values(&[&protocol]) + .observe(started.elapsed().as_secs_f64()); + } + Err(err) => { + let reason = match err { + RequestFailure::Refused => "refused", + RequestFailure::Network(OutboundFailure::DialFailure) => + "dial-failure", + RequestFailure::Network(OutboundFailure::Timeout) => + "timeout", + RequestFailure::Network(OutboundFailure::ConnectionClosed) => + "connection-closed", + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => + "unsupported", + }; + + metrics.requests_out_failure_total + .with_label_values(&[&protocol, reason]) + .inc(); + } + } + } + let _ = send_back.send(result); + } else { + error!("Request not in pending_requests"); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestStarted { protocol, .. })) => { if let Some(metrics) = this.metrics.as_ref() { metrics.requests_out_started_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&protocol)]) + .with_label_values(&[&protocol]) .inc(); } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { protocol, request_duration, .. })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestFinished { protocol, request_duration, .. })) => { if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_out_finished - .with_label_values(&[&maybe_utf8_bytes_to_string(&protocol)]) + metrics.requests_out_success_total + .with_label_values(&[&protocol]) .observe(request_duration.as_secs_f64()); } }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted(protocol))) => { if let Some(metrics) = this.metrics.as_ref() { metrics.kademlia_random_queries_total - .with_label_values(&[&maybe_utf8_bytes_to_string(protocol.as_bytes())]) + .with_label_values(&[&protocol.as_ref()]) .inc(); } }, @@ -1581,18 +1529,19 @@ impl Future for NetworkWorker { ConnectedPoint::Listener { .. } => "in", }; let reason = match cause { - ConnectionError::IO(_) => "transport-error", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::B( - EitherError::A(PingFailure::Timeout)))))))) => "ping-timeout", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))) => "force-closed", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::SyncNotificationsClogged))))))) => "sync-notifications-clogged", - ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => "protocol-error", - ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => "keep-alive-timeout", + Some(ConnectionError::IO(_)) => "transport-error", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::A(EitherError::B( + EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::A(EitherError::A( + NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))))) => "force-closed", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::A(EitherError::A( + NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => "protocol-error", + Some(ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout)) => "keep-alive-timeout", + None => "actively-closed", }; metrics.connections_closed_total.with_label_values(&[direction, reason]).inc(); @@ -1721,22 +1670,19 @@ impl Future for NetworkWorker { this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { - metrics.network_bytes_total.with_label_values(&["in"]).set(this.service.bandwidth.total_inbound()); - metrics.network_bytes_total.with_label_values(&["out"]).set(this.service.bandwidth.total_outbound()); - metrics.is_major_syncing.set(is_major_syncing as u64); - for (proto, num_entries) in this.network_service.num_kbuckets_entries() { - let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); - metrics.kbuckets_num_nodes.with_label_values(&[&proto]).set(num_entries as u64); + for (proto, buckets) in this.network_service.num_entries_per_kbucket() { + for (lower_ilog2_bucket_bound, num_entries) in buckets { + metrics.kbuckets_num_nodes + .with_label_values(&[&proto.as_ref(), &lower_ilog2_bucket_bound.to_string()]) + .set(num_entries as u64); + } } for (proto, num_entries) in this.network_service.num_kademlia_records() { - let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); - metrics.kademlia_records_count.with_label_values(&[&proto]).set(num_entries as u64); + metrics.kademlia_records_count.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); } for (proto, num_entries) in this.network_service.kademlia_records_total_size() { - let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); - metrics.kademlia_records_sizes_total.with_label_values(&[&proto]).set(num_entries as u64); + metrics.kademlia_records_sizes_total.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); } - metrics.peers_count.set(num_connected_peers as u64); metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); metrics.pending_connections.set(Swarm::network_info(&this.network_service).num_connections_pending as u64); @@ -1752,7 +1698,7 @@ impl Unpin for NetworkWorker { /// Turns bytes that are potentially UTF-8 into a reasonable representable string. /// /// Meant to be used only for debugging or metrics-reporting purposes. -fn maybe_utf8_bytes_to_string(id: &[u8]) -> Cow { +pub(crate) fn maybe_utf8_bytes_to_string(id: &[u8]) -> Cow { if let Ok(s) = std::str::from_utf8(&id[..]) { Cow::Borrowed(s) } else { diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..a63ce7a18a519d1a816eb08b12ecf70a64fef9ab --- /dev/null +++ b/client/network/src/service/metrics.rs @@ -0,0 +1,357 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::transport::BandwidthSinks; +use prometheus_endpoint::{ + self as prometheus, + Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, + PrometheusError, Registry, U64, Opts, + SourcedCounter, SourcedGauge, MetricSource, +}; +use std::{ + str, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; + +pub use prometheus_endpoint::{Histogram, HistogramVec}; + +/// Registers all networking metrics with the given registry. +pub fn register(registry: &Registry, sources: MetricSources) -> Result { + BandwidthCounters::register(registry, sources.bandwidth)?; + MajorSyncingGauge::register(registry, sources.major_syncing)?; + NumConnectedGauge::register(registry, sources.connected_peers)?; + Metrics::register(registry) +} + +/// Predefined metric sources that are fed directly into prometheus. +pub struct MetricSources { + pub bandwidth: Arc, + pub major_syncing: Arc, + pub connected_peers: Arc, +} + +/// Dedicated metrics. +pub struct Metrics { + // This list is ordered alphabetically + pub connections_closed_total: CounterVec, + pub connections_opened_total: CounterVec, + pub distinct_peers_connections_closed_total: Counter, + pub distinct_peers_connections_opened_total: Counter, + pub import_queue_blocks_submitted: Counter, + pub import_queue_finality_proofs_submitted: Counter, + pub import_queue_justifications_submitted: Counter, + pub incoming_connections_errors_total: CounterVec, + pub incoming_connections_total: Counter, + pub issued_light_requests: Counter, + pub kademlia_query_duration: HistogramVec, + pub kademlia_random_queries_total: CounterVec, + pub kademlia_records_count: GaugeVec, + pub kademlia_records_sizes_total: GaugeVec, + pub kbuckets_num_nodes: GaugeVec, + pub listeners_local_addresses: Gauge, + pub listeners_errors_total: Counter, + pub notifications_sizes: HistogramVec, + pub notifications_streams_closed_total: CounterVec, + pub notifications_streams_opened_total: CounterVec, + pub peerset_num_discovered: Gauge, + pub peerset_num_requested: Gauge, + pub pending_connections: Gauge, + pub pending_connections_errors_total: CounterVec, + pub requests_in_failure_total: CounterVec, + pub requests_in_success_total: HistogramVec, + pub requests_out_failure_total: CounterVec, + pub requests_out_success_total: HistogramVec, + pub requests_out_started_total: CounterVec, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + // This list is ordered alphabetically + connections_closed_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_connections_closed_total", + "Total number of connections closed, by direction and reason" + ), + &["direction", "reason"] + )?, registry)?, + connections_opened_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_connections_opened_total", + "Total number of connections opened by direction" + ), + &["direction"] + )?, registry)?, + distinct_peers_connections_closed_total: prometheus::register(Counter::new( + "sub_libp2p_distinct_peers_connections_closed_total", + "Total number of connections closed with distinct peers" + )?, registry)?, + distinct_peers_connections_opened_total: prometheus::register(Counter::new( + "sub_libp2p_distinct_peers_connections_opened_total", + "Total number of connections opened with distinct peers" + )?, registry)?, + import_queue_blocks_submitted: prometheus::register(Counter::new( + "import_queue_blocks_submitted", + "Number of blocks submitted to the import queue.", + )?, registry)?, + import_queue_finality_proofs_submitted: prometheus::register(Counter::new( + "import_queue_finality_proofs_submitted", + "Number of finality proofs submitted to the import queue.", + )?, registry)?, + import_queue_justifications_submitted: prometheus::register(Counter::new( + "import_queue_justifications_submitted", + "Number of justifications submitted to the import queue.", + )?, registry)?, + incoming_connections_errors_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_incoming_connections_handshake_errors_total", + "Total number of incoming connections that have failed during the \ + initial handshake" + ), + &["reason"] + )?, registry)?, + incoming_connections_total: prometheus::register(Counter::new( + "sub_libp2p_incoming_connections_total", + "Total number of incoming connections on the listening sockets" + )?, registry)?, + issued_light_requests: prometheus::register(Counter::new( + "issued_light_requests", + "Number of light client requests that our node has issued.", + )?, registry)?, + kademlia_query_duration: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_kademlia_query_duration", + "Duration of Kademlia queries per query type" + ), + buckets: prometheus::exponential_buckets(0.5, 2.0, 10) + .expect("parameters are always valid values; qed"), + }, + &["type"] + )?, registry)?, + kademlia_random_queries_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_kademlia_random_queries_total", + "Number of random Kademlia queries started" + ), + &["protocol"] + )?, registry)?, + kademlia_records_count: prometheus::register(GaugeVec::new( + Opts::new( + "sub_libp2p_kademlia_records_count", + "Number of records in the Kademlia records store" + ), + &["protocol"] + )?, registry)?, + kademlia_records_sizes_total: prometheus::register(GaugeVec::new( + Opts::new( + "sub_libp2p_kademlia_records_sizes_total", + "Total size of all the records in the Kademlia records store" + ), + &["protocol"] + )?, registry)?, + kbuckets_num_nodes: prometheus::register(GaugeVec::new( + Opts::new( + "sub_libp2p_kbuckets_num_nodes", + "Number of nodes per kbucket per Kademlia instance" + ), + &["protocol", "lower_ilog2_bucket_bound"] + )?, registry)?, + listeners_local_addresses: prometheus::register(Gauge::new( + "sub_libp2p_listeners_local_addresses", "Number of local addresses we're listening on" + )?, registry)?, + listeners_errors_total: prometheus::register(Counter::new( + "sub_libp2p_listeners_errors_total", + "Total number of non-fatal errors reported by a listener" + )?, registry)?, + notifications_sizes: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_notifications_sizes", + "Sizes of the notifications send to and received from all nodes" + ), + buckets: prometheus::exponential_buckets(64.0, 4.0, 8) + .expect("parameters are always valid values; qed"), + }, + &["direction", "protocol"] + )?, registry)?, + notifications_streams_closed_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_notifications_streams_closed_total", + "Total number of notification substreams that have been closed" + ), + &["protocol"] + )?, registry)?, + notifications_streams_opened_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_notifications_streams_opened_total", + "Total number of notification substreams that have been opened" + ), + &["protocol"] + )?, registry)?, + peerset_num_discovered: prometheus::register(Gauge::new( + "sub_libp2p_peerset_num_discovered", "Number of nodes stored in the peerset manager", + )?, registry)?, + peerset_num_requested: prometheus::register(Gauge::new( + "sub_libp2p_peerset_num_requested", "Number of nodes that the peerset manager wants us to be connected to", + )?, registry)?, + pending_connections: prometheus::register(Gauge::new( + "sub_libp2p_pending_connections", + "Number of connections in the process of being established", + )?, registry)?, + pending_connections_errors_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_pending_connections_errors_total", + "Total number of pending connection errors" + ), + &["reason"] + )?, registry)?, + requests_in_failure_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_requests_in_failure_total", + "Total number of incoming requests that the node has failed to answer" + ), + &["protocol", "reason"] + )?, registry)?, + requests_in_success_total: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_requests_in_success_total", + "Total number of requests received and answered" + ), + buckets: prometheus::exponential_buckets(0.001, 2.0, 16) + .expect("parameters are always valid values; qed"), + }, + &["protocol"] + )?, registry)?, + requests_out_failure_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_requests_out_failure_total", + "Total number of requests that have failed" + ), + &["protocol", "reason"] + )?, registry)?, + requests_out_success_total: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_requests_out_success_total", + "For successful requests, time between a request's start and finish" + ), + buckets: prometheus::exponential_buckets(0.001, 2.0, 16) + .expect("parameters are always valid values; qed"), + }, + &["protocol"] + )?, registry)?, + requests_out_started_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_requests_out_started_total", + "Total number of requests emitted" + ), + &["protocol"] + )?, registry)?, + }) + } +} + +/// The bandwidth counter metric. +#[derive(Clone)] +pub struct BandwidthCounters(Arc); + +impl BandwidthCounters { + /// Registers the `BandwidthCounters` metric whose values are + /// obtained from the given sinks. + fn register(registry: &Registry, sinks: Arc) -> Result<(), PrometheusError> { + prometheus::register(SourcedCounter::new( + &Opts::new( + "sub_libp2p_network_bytes_total", + "Total bandwidth usage" + ).variable_label("direction"), + BandwidthCounters(sinks), + )?, registry)?; + + Ok(()) + } +} + +impl MetricSource for BandwidthCounters { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[&"in"], self.0.total_inbound()); + set(&[&"out"], self.0.total_outbound()); + } +} + +/// The "major syncing" metric. +#[derive(Clone)] +pub struct MajorSyncingGauge(Arc); + +impl MajorSyncingGauge { + /// Registers the `MajorSyncGauge` metric whose value is + /// obtained from the given `AtomicBool`. + fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { + prometheus::register(SourcedGauge::new( + &Opts::new( + "sub_libp2p_is_major_syncing", + "Whether the node is performing a major sync or not.", + ), + MajorSyncingGauge(value), + )?, registry)?; + + Ok(()) + } +} + +impl MetricSource for MajorSyncingGauge { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[], self.0.load(Ordering::Relaxed) as u64); + } +} + +/// The connected peers metric. +#[derive(Clone)] +pub struct NumConnectedGauge(Arc); + +impl NumConnectedGauge { + /// Registers the `MajorSyncingGauge` metric whose value is + /// obtained from the given `AtomicUsize`. + fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { + prometheus::register(SourcedGauge::new( + &Opts::new( + "sub_libp2p_peers_count", + "Number of connected peers", + ), + NumConnectedGauge(value), + )?, registry)?; + + Ok(()) + } +} + +impl MetricSource for NumConnectedGauge { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[], self.0.load(Ordering::Relaxed) as u64); + } +} diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 031d1641d234ebf0da1154d41c75a1c86f462326..4b6f9dd15648265c68b3377598d1b5c9f3392017 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -101,7 +101,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) finality_proof_request_builder: None, on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), - protocol_id: config::ProtocolId::from(&b"/test-protocol-name"[..]), + protocol_id: config::ProtocolId::from("/test-protocol-name"), import_queue, block_announce_validator: Box::new( sp_consensus::block_validation::DefaultBlockAnnounceValidator, @@ -131,14 +131,14 @@ fn build_nodes_one_proto() let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![], reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, @@ -281,7 +281,7 @@ fn lots_of_incoming_peers_works() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (main_node, _) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![listen_addr.clone()], in_peers: u32::max_value(), transport: config::TransportConfig::MemoryOnly, @@ -298,7 +298,7 @@ fn lots_of_incoming_peers_works() { let main_node_peer_id = main_node_peer_id.clone(); let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![], reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr.clone(), @@ -346,9 +346,7 @@ fn lots_of_incoming_peers_works() { }); } -// TODO: this test is at the moment ignored because of https://github.com/paritytech/substrate/issues/6766 #[test] -#[ignore] fn notifications_back_pressure() { // Node 1 floods node 2 with notifications. Random sleeps are done on node 2 to simulate the // node being busy. We make sure that all notifications are received. diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index e8836c4c269a525b04ed08c78e504ec51f03cb95..626f84b6b5f018b167cab201cc5a2c2cc7923c92 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -16,11 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use futures::prelude::*; use libp2p::{ InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, core::{ - self, either::{EitherError, EitherOutput}, muxing::StreamMuxerBox, + self, either::EitherOutput, muxing::StreamMuxerBox, transport::{boxed::Boxed, OptionalTransport}, upgrade }, mplex, identity, bandwidth, wasm_ext, noise @@ -44,46 +43,6 @@ pub fn build_transport( wasm_external_transport: Option, use_yamux_flow_control: bool ) -> (Boxed<(PeerId, StreamMuxerBox), io::Error>, Arc) { - // Legacy noise configurations for backward compatibility. - let mut noise_legacy = noise::LegacyConfig::default(); - noise_legacy.send_legacy_handshake = true; - - // Build configuration objects for encryption mechanisms. - let noise_config = { - // For more information about these two panics, see in "On the Importance of - // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, - // and Richard J. Lipton. - let noise_keypair_legacy = noise::Keypair::::new().into_authentic(&keypair) - .expect("can only fail in case of a hardware bug; since this signing is performed only \ - once and at initialization, we're taking the bet that the inconvenience of a very \ - rare panic here is basically zero"); - let noise_keypair_spec = noise::Keypair::::new().into_authentic(&keypair) - .expect("can only fail in case of a hardware bug; since this signing is performed only \ - once and at initialization, we're taking the bet that the inconvenience of a very \ - rare panic here is basically zero"); - - let mut xx_config = noise::NoiseConfig::xx(noise_keypair_spec); - xx_config.set_legacy_config(noise_legacy.clone()); - let mut ix_config = noise::NoiseConfig::ix(noise_keypair_legacy); - ix_config.set_legacy_config(noise_legacy); - - core::upgrade::SelectUpgrade::new(xx_config, ix_config) - }; - - // Build configuration objects for multiplexing mechanisms. - let mut mplex_config = mplex::MplexConfig::new(); - mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); - mplex_config.max_buffer_len(usize::MAX); - - let mut yamux_config = libp2p::yamux::Config::default(); - yamux_config.set_lazy_open(true); // Only set SYN flag on first data frame sent to the remote. - - if use_yamux_flow_control { - // Enable proper flow-control: window updates are only sent when - // buffered data has been consumed. - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); - } - // Build the base layer of the transport. let transport = if let Some(t) = wasm_external_transport { OptionalTransport::some(t) @@ -112,45 +71,63 @@ pub fn build_transport( let (transport, bandwidth) = bandwidth::BandwidthLogging::new(transport); - // Encryption - let transport = transport.and_then(move |stream, endpoint| { - core::upgrade::apply(stream, noise_config, endpoint, upgrade::Version::V1) - .map_err(|err| - err.map_err(|err| match err { - EitherError::A(err) => err, - EitherError::B(err) => err, - }) - ) - .and_then(|result| async move { - let remote_key = match &result { - EitherOutput::First((noise::RemoteIdentity::IdentityKey(key), _)) => key.clone(), - EitherOutput::Second((noise::RemoteIdentity::IdentityKey(key), _)) => key.clone(), - _ => return Err(upgrade::UpgradeError::Apply(noise::NoiseError::InvalidKey)) - }; - let out = match result { - EitherOutput::First((_, o)) => o, - EitherOutput::Second((_, o)) => o, - }; - Ok((out, remote_key.into_peer_id())) - }) - }); + let authentication_config = { + // For more information about these two panics, see in "On the Importance of + // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, + // and Richard J. Lipton. + let noise_keypair_legacy = noise::Keypair::::new().into_authentic(&keypair) + .expect("can only fail in case of a hardware bug; since this signing is performed only \ + once and at initialization, we're taking the bet that the inconvenience of a very \ + rare panic here is basically zero"); + let noise_keypair_spec = noise::Keypair::::new().into_authentic(&keypair) + .expect("can only fail in case of a hardware bug; since this signing is performed only \ + once and at initialization, we're taking the bet that the inconvenience of a very \ + rare panic here is basically zero"); + + // Legacy noise configurations for backward compatibility. + let mut noise_legacy = noise::LegacyConfig::default(); + noise_legacy.send_legacy_handshake = true; + noise_legacy.recv_legacy_handshake = true; - // Multiplexing - let transport = transport.and_then(move |(stream, peer_id), endpoint| { - let peer_id2 = peer_id.clone(); - let upgrade = core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) - .map_inbound(move |muxer| (peer_id, muxer)) - .map_outbound(move |muxer| (peer_id2, muxer)); + let mut xx_config = noise::NoiseConfig::xx(noise_keypair_spec); + xx_config.set_legacy_config(noise_legacy.clone()); + let mut ix_config = noise::NoiseConfig::ix(noise_keypair_legacy); + ix_config.set_legacy_config(noise_legacy); + + let extract_peer_id = |result| match result { + EitherOutput::First((peer_id, o)) => (peer_id, EitherOutput::First(o)), + EitherOutput::Second((peer_id, o)) => (peer_id, EitherOutput::Second(o)), + }; + + core::upgrade::SelectUpgrade::new(xx_config.into_authenticated(), ix_config.into_authenticated()) + .map_inbound(extract_peer_id) + .map_outbound(extract_peer_id) + }; + + let multiplexing_config = { + let mut mplex_config = mplex::MplexConfig::new(); + mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); + mplex_config.max_buffer_len(usize::MAX); + + let mut yamux_config = libp2p::yamux::Config::default(); - core::upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1) - .map_ok(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) - }); + if use_yamux_flow_control { + // Enable proper flow-control: window updates are only sent when + // buffered data has been consumed. + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); + } - let transport = transport - .timeout(Duration::from_secs(20)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .boxed(); + core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) + .map_inbound(move |muxer| core::muxing::StreamMuxerBox::new(muxer)) + .map_outbound(move |muxer| core::muxing::StreamMuxerBox::new(muxer)) + }; + + let transport = transport.upgrade(upgrade::Version::V1) + .authenticate(authentication_config) + .multiplex(multiplexing_config) + .timeout(Duration::from_secs(20)) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed(); (transport, bandwidth) } - diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 7c157ce1c608e2d3d5f25b0a3e0fa767092a71f9..26e1631d9f1aad9accc5ca74e3aa9a8aaafe75a1 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Integration tests for Substrate network protocol" name = "sc-network-test" -version = "0.8.0-rc5" +version = "0.8.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,23 +13,23 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-network = { version = "0.8.0-rc5", path = "../" } +sc-network = { version = "0.8.0", path = "../" } log = "0.4.8" parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.23.0", default-features = false } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../../primitives/consensus/babe" } -env_logger = "0.7.0" -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../../test-utils/runtime" } +libp2p = { version = "0.28.1", default-features = false } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } tempfile = "3.1.0" -sc-service = { version = "0.8.0-rc5", default-features = false, features = ["test-helpers"], path = "../../service" } +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 35587cbdc08b3d609ad63a648714d446982a34eb..587feebe55c148c86c16869e81e6c849b9acf9ce 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -22,7 +22,10 @@ mod block_import; #[cfg(test)] mod sync; -use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, task::{Poll, Context as FutureContext}}; +use std::{ + borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, + task::{Poll, Context as FutureContext} +}; use libp2p::build_multiaddr; use log::trace; @@ -55,7 +58,7 @@ use sp_core::H256; use sc_network::config::ProtocolConfig; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::Justification; +use sp_runtime::{ConsensusEngineId, Justification}; use substrate_test_runtime_client::{self, AccountKeyring}; use sc_service::client::Client; pub use sc_network::config::EmptyTransactionPool; @@ -553,6 +556,8 @@ pub struct FullPeerConfig { pub keep_blocks: Option, /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, + /// List of notification protocols that the network must support. + pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, } pub trait TestNetFactory: Sized { @@ -663,6 +668,7 @@ pub trait TestNetFactory: Sized { network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + network_config.notifications_protocols = config.notifications_protocols; let network = NetworkWorker::new(sc_network::config::Params { role: Role::Full, @@ -675,7 +681,7 @@ pub trait TestNetFactory: Sized { finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), + protocol_id: ProtocolId::from("test-protocol-name"), import_queue, block_announce_validator: config.block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), @@ -755,7 +761,7 @@ pub trait TestNetFactory: Sized { finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), + protocol_id: ProtocolId::from("test-protocol-name"), import_queue, block_announce_validator: Box::new(DefaultBlockAnnounceValidator), metrics_registry: None, diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 1cf2a8fee37982495811a2bd464bb0eb5644b728..86e274aae10ebbaec18d9e23c3388807ae0170fc 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -24,7 +24,7 @@ use sp_consensus::block_validation::Validation; use substrate_test_runtime::Header; fn test_ancestor_search_when_common_is(n: usize) { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.peer(0).push_blocks(n, false); @@ -42,7 +42,7 @@ fn test_ancestor_search_when_common_is(n: usize) { #[test] fn sync_peers_works() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); block_on(futures::future::poll_fn::<(), _>(|cx| { @@ -58,7 +58,7 @@ fn sync_peers_works() { #[test] fn sync_cycle_from_offline_to_syncing_to_offline() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); for peer in 0..3 { // Offline, and not major syncing. @@ -113,7 +113,7 @@ fn sync_cycle_from_offline_to_syncing_to_offline() { #[test] fn syncing_node_not_major_syncing_when_disconnected() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); // Generate blocks. @@ -147,7 +147,7 @@ fn syncing_node_not_major_syncing_when_disconnected() { #[test] fn sync_from_two_peers_works() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); @@ -159,7 +159,7 @@ fn sync_from_two_peers_works() { #[test] fn sync_from_two_peers_with_ancestry_search_works() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.peer(0).push_blocks(10, true); net.peer(1).push_blocks(100, false); @@ -171,7 +171,7 @@ fn sync_from_two_peers_with_ancestry_search_works() { #[test] fn ancestry_search_works_when_backoff_is_one() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.peer(0).push_blocks(1, false); @@ -185,7 +185,7 @@ fn ancestry_search_works_when_backoff_is_one() { #[test] fn ancestry_search_works_when_ancestor_is_genesis() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.peer(0).push_blocks(13, true); @@ -214,7 +214,7 @@ fn ancestry_search_works_when_common_is_hundred() { #[test] fn sync_long_chain_works() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(2); net.peer(1).push_blocks(500, false); net.block_until_sync(); @@ -224,7 +224,7 @@ fn sync_long_chain_works() { #[test] fn sync_no_common_longer_chain_fails() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.peer(0).push_blocks(20, true); net.peer(1).push_blocks(20, false); @@ -242,7 +242,7 @@ fn sync_no_common_longer_chain_fails() { #[test] fn sync_justifications() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = JustificationTestNet::new(3); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -283,7 +283,7 @@ fn sync_justifications() { #[test] fn sync_justifications_across_forks() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = JustificationTestNet::new(3); // we push 5 blocks net.peer(0).push_blocks(5, false); @@ -315,7 +315,7 @@ fn sync_justifications_across_forks() { #[test] fn sync_after_fork_works() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.peer(0).push_blocks(30, false); net.peer(1).push_blocks(30, false); @@ -338,7 +338,7 @@ fn sync_after_fork_works() { #[test] fn syncs_all_forks() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(4); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); @@ -356,7 +356,7 @@ fn syncs_all_forks() { #[test] fn own_blocks_are_announced() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.block_until_sync(); // connect'em net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); @@ -372,7 +372,7 @@ fn own_blocks_are_announced() { #[test] fn blocks_are_not_announced_by_light_nodes() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(0); // full peer0 is connected to light peer @@ -401,7 +401,7 @@ fn blocks_are_not_announced_by_light_nodes() { #[test] fn can_sync_small_non_best_forks() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(2); net.peer(0).push_blocks(30, false); net.peer(1).push_blocks(30, false); @@ -464,7 +464,7 @@ fn can_sync_small_non_best_forks() { #[test] fn can_not_sync_from_light_peer() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); // given the network with 1 full nodes (#0) and 1 light node (#1) let mut net = TestNet::new(1); @@ -497,7 +497,7 @@ fn can_not_sync_from_light_peer() { #[test] fn light_peer_imports_header_from_announce() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); fn import_with_announce(net: &mut TestNet, hash: H256) { net.peer(0).announce_block(hash, Vec::new()); @@ -530,7 +530,7 @@ fn light_peer_imports_header_from_announce() { #[test] fn can_sync_explicit_forks() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(2); net.peer(0).push_blocks(30, false); net.peer(1).push_blocks(30, false); @@ -584,7 +584,7 @@ fn can_sync_explicit_forks() { #[test] fn syncs_header_only_forks() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(0); net.add_full_peer_with_config(Default::default()); net.add_full_peer_with_config(FullPeerConfig { keep_blocks: Some(3), ..Default::default() }); @@ -602,7 +602,7 @@ fn syncs_header_only_forks() { #[test] fn does_not_sync_announced_old_best_block() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(3); let old_hash = net.peer(0).push_blocks(1, false); @@ -630,7 +630,7 @@ fn does_not_sync_announced_old_best_block() { #[test] fn full_sync_requires_block_body() { // Check that we don't sync headers-only in full mode. - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(2); net.peer(0).push_headers(1); @@ -649,7 +649,7 @@ fn full_sync_requires_block_body() { #[test] fn imports_stale_once() { - let _ = ::env_logger::try_init(); + sp_tracing::try_init_simple(); fn import_with_announce(net: &mut TestNet, hash: H256) { // Announce twice @@ -685,7 +685,7 @@ fn imports_stale_once() { #[test] fn can_sync_to_peers_with_wrong_common_block() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut net = TestNet::new(2); net.peer(0).push_blocks(2, true); @@ -727,7 +727,7 @@ impl BlockAnnounceValidator for NewBestBlockAnnounceValidator { #[test] fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); log::trace!(target: "sync", "Test"); let mut net = TestNet::with_fork_choice(ForkChoiceStrategy::Custom(false)); net.add_full_peer_with_config(Default::default()); diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index ef2b00daab4d3e183107a152f077f29e138a9ad8..5686d33da9b233eb874be1c05d88f36ca70a0f35 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -1,46 +1,47 @@ [package] description = "Substrate offchain workers" name = "sc-offchain" -version = "2.0.0-rc5" +version = "2.0.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = "0.5" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } fnv = "1.0.6" futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" -sp-offchain = { version = "2.0.0-rc5", path = "../../primitives/offchain" } +sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } rand = "0.7.2" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sc-network = { version = "0.8.0", path = "../network" } +sc-keystore = { version = "2.0.0", path = "../keystore" } [target.'cfg(not(target_os = "unknown"))'.dependencies] hyper = "0.13.2" hyper-rustls = "0.21.0" [dev-dependencies] -env_logger = "0.7.0" -sc-client-db = { version = "0.8.0-rc5", default-features = true, path = "../db/" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sc-client-db = { version = "0.8.0", default-features = true, path = "../db/" } +sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "0.2" lazy_static = "1.4.0" diff --git a/client/offchain/README.md b/client/offchain/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f7c097e8e0b2a9e3ac07bbf0a41359aadf4a758f --- /dev/null +++ b/client/offchain/README.md @@ -0,0 +1,18 @@ +Substrate offchain workers. + +The offchain workers is a special function of the runtime that +gets executed after block is imported. During execution +it's able to asynchronously submit extrinsics that will either +be propagated to other nodes or added to the next block +produced by the node as unsigned transactions. + +Offchain workers can be used for computation-heavy tasks +that are not feasible for execution during regular block processing. +It can either be tasks that no consensus is required for, +or some form of consensus over the data can be built on-chain +for instance via: +1. Challenge period for incorrect computations +2. Majority voting for results +3. etc + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 5287ac8251eebf056fae07672441b8420eb3955e..6fb1da19bf058e8071d77525c656ad20a3ee3bde 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -19,16 +19,18 @@ use std::{ sync::Arc, convert::TryFrom, thread::sleep, + collections::HashSet, }; -use sp_core::offchain::OffchainStorage; +use crate::NetworkProvider; use futures::Future; use log::error; -use sc_network::{PeerId, Multiaddr, NetworkStateInfo}; +use sc_network::{PeerId, Multiaddr}; use codec::{Encode, Decode}; +use sp_core::OpaquePeerId; use sp_core::offchain::{ Externalities as OffchainExt, HttpRequestId, Timestamp, HttpRequestStatus, HttpError, - OpaqueNetworkState, OpaquePeerId, OpaqueMultiaddr, StorageKind, + OffchainStorage, OpaqueNetworkState, OpaqueMultiaddr, StorageKind, }; pub use sp_offchain::STORAGE_PREFIX; pub use http::SharedClient; @@ -49,8 +51,8 @@ mod timestamp; pub(crate) struct Api { /// Offchain Workers database. db: Storage, - /// A NetworkState provider. - network_state: Arc, + /// A provider for substrate networking. + network_provider: Arc, /// Is this node a potential validator? is_validator: bool, /// Everything HTTP-related is handled by a different struct. @@ -73,10 +75,10 @@ impl OffchainExt for Api { } fn network_state(&self) -> Result { - let external_addresses = self.network_state.external_addresses(); + let external_addresses = self.network_provider.external_addresses(); let state = NetworkState::new( - self.network_state.local_peer_id(), + self.network_provider.local_peer_id(), external_addresses, ); Ok(OpaqueNetworkState::from(state)) @@ -180,6 +182,15 @@ impl OffchainExt for Api { ) -> Result { self.http.response_read_body(request_id, buffer, deadline) } + + fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { + let peer_ids: HashSet = nodes.into_iter() + .filter_map(|node| PeerId::from_bytes(node.0).ok()) + .collect(); + + self.network_provider.set_authorized_peers(peer_ids); + self.network_provider.set_authorized_only(authorized_only); + } } /// Information about the local node's network state. @@ -256,10 +267,10 @@ pub(crate) struct AsyncApi { } impl AsyncApi { - /// Creates new Offchain extensions API implementation an the asynchronous processing part. + /// Creates new Offchain extensions API implementation an the asynchronous processing part. pub fn new( db: S, - network_state: Arc, + network_provider: Arc, is_validator: bool, shared_client: SharedClient, ) -> (Api, Self) { @@ -267,7 +278,7 @@ impl AsyncApi { let api = Api { db, - network_state, + network_provider, is_validator, http: http_api, }; @@ -292,11 +303,21 @@ mod tests { use super::*; use std::{convert::{TryFrom, TryInto}, time::SystemTime}; use sc_client_db::offchain::LocalStorage; - use sc_network::PeerId; + use sc_network::{NetworkStateInfo, PeerId}; - struct MockNetworkStateInfo(); + struct TestNetwork(); + + impl NetworkProvider for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!() + } - impl NetworkStateInfo for MockNetworkStateInfo { + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!() + } + } + + impl NetworkStateInfo for TestNetwork { fn external_addresses(&self) -> Vec { Vec::new() } @@ -307,12 +328,11 @@ mod tests { } fn offchain_api() -> (Api, AsyncApi) { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let db = LocalStorage::new_test(); - let mock = Arc::new(MockNetworkStateInfo()); + let mock = Arc::new(TestNetwork()); let shared_client = SharedClient::new(); - AsyncApi::new( db, mock, diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 3b17c14f19652198abba3267b1f7c4bc1060ba4f..885294449fb950d1d286dea46ae0dbb4c91d380f 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -33,14 +33,17 @@ #![warn(missing_docs)] -use std::{fmt, marker::PhantomData, sync::Arc}; +use std::{ + fmt, marker::PhantomData, sync::Arc, + collections::HashSet, +}; use parking_lot::Mutex; use threadpool::ThreadPool; use sp_api::{ApiExt, ProvideRuntimeApi}; use futures::future::Future; use log::{debug, warn}; -use sc_network::NetworkStateInfo; +use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; use sp_core::{offchain::{self, OffchainStorage}, ExecutionContext, traits::SpawnNamed}; use sp_runtime::{generic::BlockId, traits::{self, Header}}; use futures::{prelude::*, future::ready}; @@ -50,6 +53,30 @@ use api::SharedClient; pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; +/// NetworkProvider provides [`OffchainWorkers`] with all necessary hooks into the +/// underlying Substrate networking. +pub trait NetworkProvider: NetworkStateInfo { + /// Set the authorized peers. + fn set_authorized_peers(&self, peers: HashSet); + + /// Set the authorized only flag. + fn set_authorized_only(&self, reserved_only: bool); +} + +impl NetworkProvider for NetworkService +where + B: traits::Block + 'static, + H: ExHashT, +{ + fn set_authorized_peers(&self, peers: HashSet) { + self.set_authorized_peers(peers) + } + + fn set_authorized_only(&self, reserved_only: bool) { + self.set_authorized_only(reserved_only) + } +} + /// An offchain workers manager. pub struct OffchainWorkers { client: Arc, @@ -98,7 +125,7 @@ impl OffchainWorkers< pub fn on_block_imported( &self, header: &Block::Header, - network_state: Arc, + network_provider: Arc, is_validator: bool, ) -> impl Future { let runtime = self.client.runtime_api(); @@ -122,7 +149,7 @@ impl OffchainWorkers< if version > 0 { let (api, runner) = api::AsyncApi::new( self.db.clone(), - network_state.clone(), + network_provider, is_validator, self.shared_client.clone(), ); @@ -173,7 +200,7 @@ pub async fn notification_future( client: Arc, offchain: Arc>, spawner: Spawner, - network_state_info: Arc, + network_provider: Arc, ) where Block: traits::Block, @@ -188,7 +215,7 @@ pub async fn notification_future( "offchain-on-block", offchain.on_block_imported( &n.header, - network_state_info.clone(), + network_provider.clone(), is_validator, ).boxed(), ); @@ -213,9 +240,9 @@ mod tests { use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_transaction_pool::{TransactionPool, InPoolTransaction}; - struct MockNetworkStateInfo(); + struct TestNetwork(); - impl NetworkStateInfo for MockNetworkStateInfo { + impl NetworkStateInfo for TestNetwork { fn external_addresses(&self) -> Vec { Vec::new() } @@ -225,6 +252,16 @@ mod tests { } } + impl NetworkProvider for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!() + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!() + } + } + struct TestPool( Arc, Block>> ); @@ -244,7 +281,7 @@ mod tests { #[test] fn should_call_into_runtime_and_produce_extrinsic() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); @@ -255,12 +292,14 @@ mod tests { client.clone(), )); let db = sc_client_db::offchain::LocalStorage::new_test(); - let network_state = Arc::new(MockNetworkStateInfo()); + let network = Arc::new(TestNetwork()); let header = client.header(&BlockId::number(0)).unwrap().unwrap(); // when let offchain = OffchainWorkers::new(client, db); - futures::executor::block_on(offchain.on_block_imported(&header, network_state, false)); + futures::executor::block_on( + offchain.on_block_imported(&header, network, false) + ); // then assert_eq!(pool.0.status().ready, 1); diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 8a9aa0adb18076f636c24cfdde02c2fda132f46d..40062db8f9b911b44d9c666a026b97149f315cae 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -3,11 +3,12 @@ description = "Connectivity manager based on reputation" homepage = "http://parity.io" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" name = "sc-peerset" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-peerset" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" -libp2p = { version = "0.23.0", default-features = false } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils"} +libp2p = { version = "0.28.1", default-features = false } +sp-utils = { version = "2.0.0", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" wasm-timer = "0.2" diff --git a/client/peerset/README.md b/client/peerset/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b54c52001caf7b6767983445513fa3545fd26f4 --- /dev/null +++ b/client/peerset/README.md @@ -0,0 +1,4 @@ +Peer Set Manager (PSM). Contains the strategy for choosing which nodes the network should be +connected to. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 6f28dd036a3cc53408583be8cbe04e24af9a8448..575743afa079c0f3c2ba2ddd1f0bf3bfdc195936 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -45,6 +45,7 @@ const FORGET_AFTER: Duration = Duration::from_secs(3600); enum Action { AddReservedPeer(PeerId), RemoveReservedPeer(PeerId), + SetReservedPeers(HashSet), SetReservedOnly(bool), ReportPeer(PeerId, ReputationChange), SetPriorityGroup(String, HashSet), @@ -102,6 +103,11 @@ impl PeersetHandle { pub fn set_reserved_only(&self, reserved: bool) { let _ = self.tx.unbounded_send(Action::SetReservedOnly(reserved)); } + + /// Set reserved peers to the new set. + pub fn set_reserved_peers(&self, peer_ids: HashSet) { + let _ = self.tx.unbounded_send(Action::SetReservedPeers(peer_ids)); + } /// Reports an adjustment to the reputation of the given peer. pub fn report_peer(&self, peer_id: PeerId, score_diff: ReputationChange) { @@ -246,6 +252,10 @@ impl Peerset { fn on_remove_reserved_peer(&mut self, peer_id: PeerId) { self.on_remove_from_priority_group(RESERVED_NODES, peer_id); } + + fn on_set_reserved_peers(&mut self, peer_ids: HashSet) { + self.on_set_priority_group(RESERVED_NODES, peer_ids); + } fn on_set_reserved_only(&mut self, reserved_only: bool) { self.reserved_only = reserved_only; @@ -655,6 +665,8 @@ impl Stream for Peerset { self.on_add_reserved_peer(peer_id), Action::RemoveReservedPeer(peer_id) => self.on_remove_reserved_peer(peer_id), + Action::SetReservedPeers(peer_ids) => + self.on_set_reserved_peers(peer_ids), Action::SetReservedOnly(reserved) => self.on_set_reserved_only(reserved), Action::ReportPeer(peer_id, score_diff) => diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml index 8427786919f7797545bbd182e8cb396d35950bf7..085f50f5be551ea03d5c5506db45b1c44d9a7c9c 100644 --- a/client/proposer-metrics/Cargo.toml +++ b/client/proposer-metrics/Cargo.toml @@ -1,16 +1,17 @@ [package] name = "sc-proposer-metrics" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Basic metrics for block production." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} diff --git a/client/proposer-metrics/README.md b/client/proposer-metrics/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9669c7d35191dc6dc6d4c6df5878c6b4c9bea30d --- /dev/null +++ b/client/proposer-metrics/README.md @@ -0,0 +1,3 @@ +Prometheus basic proposer metrics. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 7701befbf71a421b7f5451ac0d0899cf73ff0a04..55eb51d261cdbdb008da2924a235e60c52bde876 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-rpc-api" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC interfaces." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,17 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "1.3.4" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } -jsonrpc-core = "14.2.0" -jsonrpc-core-client = "14.2.0" -jsonrpc-derive = "14.2.1" -jsonrpc-pubsub = "14.2.0" +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" +jsonrpc-pubsub = "15.0.0" log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-rc5"} -sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0-rc5"} +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0"} +sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0"} serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sp-rpc = { version = "2.0.0-rc5", path = "../../primitives/rpc" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } diff --git a/client/rpc-api/README.md b/client/rpc-api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e860e0c2334da50e57bee241b14e2807b25bc7f3 --- /dev/null +++ b/client/rpc-api/README.md @@ -0,0 +1,5 @@ +Substrate RPC interfaces. + +A collection of RPC methods and subscriptions supported by all substrate clients. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index 753ac5617a29d70380e3502d95a2145af8a926a9..9bb75216c0186a016b8c69741bdbed29ae61c66d 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -21,7 +21,6 @@ pub mod error; use jsonrpc_core::Result as RpcResult; -use jsonrpc_core::futures::Future; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use sp_rpc::{number::NumberOrHex, list::ListOrValue}; diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 1bfbb4786e677bb2eb89cb2b51346f9ae4d56890..874fc862a39d25e958ed09f421d2a6b3181172e0 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -22,7 +22,6 @@ pub mod error; pub mod helpers; use jsonrpc_core::Result as RpcResult; -use jsonrpc_core::futures::Future; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use sp_core::Bytes; diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index 5dbe93543d8e5313a617a91ccfd3fa919ec661ee..dd3294c243116a084730f252b6f956204d65729d 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -67,8 +67,6 @@ pub struct PeerInfo { pub peer_id: String, /// Roles pub roles: String, - /// Protocol version - pub protocol_version: u32, /// Peer best block hash pub best_hash: Hash, /// Peer best block number @@ -110,11 +108,10 @@ mod tests { ::serde_json::to_string(&PeerInfo { peer_id: "2".into(), roles: "a".into(), - protocol_version: 2, best_hash: 5u32, best_number: 6u32, }).unwrap(), - r#"{"peerId":"2","roles":"a","protocolVersion":2,"bestHash":5,"bestNumber":6}"#, + r#"{"peerId":"2","roles":"a","bestHash":5,"bestNumber":6}"#, ); } } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index f6a1b470249efad6c071023eaa79153925cafb33..4fdf0298a530b7a996ce980ca9466b78c45f7c8c 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -1,25 +1,28 @@ [package] name = "sc-rpc-server" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC servers." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpc-core = "14.2.0" -pubsub = { package = "jsonrpc-pubsub", version = "14.2.0" } +futures = "0.1.6" +jsonrpc-core = "15.0.0" +pubsub = { package = "jsonrpc-pubsub", version = "15.0.0" } log = "0.4.8" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} serde = "1.0.101" serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } [target.'cfg(not(target_os = "unknown"))'.dependencies] -http = { package = "jsonrpc-http-server", version = "14.2.0" } -ws = { package = "jsonrpc-ws-server", version = "14.2.0" } -ipc = { version = "14.2.0", package = "jsonrpc-ipc-server" } +http = { package = "jsonrpc-http-server", version = "15.0.0" } +ipc = { package = "jsonrpc-ipc-server", version = "15.0.0" } +ws = { package = "jsonrpc-ws-server", version = "15.0.0" } diff --git a/client/rpc-servers/README.md b/client/rpc-servers/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cf00b3169a6276b048b5a132d02df7c77e21dbf0 --- /dev/null +++ b/client/rpc-servers/README.md @@ -0,0 +1,3 @@ +Substrate RPC servers. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 1e476262acea8761e8a0e266189934b02ad0f20d..1f99e8bb0d242496c30ec5dd6bfd036dce709d0a 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -20,8 +20,10 @@ #![warn(missing_docs)] +mod middleware; + use std::io; -use jsonrpc_core::IoHandlerExtension; +use jsonrpc_core::{IoHandlerExtension, MetaIoHandler}; use log::error; use pubsub::PubSubMetadata; @@ -32,15 +34,18 @@ const MAX_PAYLOAD: usize = 15 * 1024 * 1024; const WS_MAX_CONNECTIONS: usize = 100; /// The RPC IoHandler containing all requested APIs. -pub type RpcHandler = pubsub::PubSubHandler; +pub type RpcHandler = pubsub::PubSubHandler; pub use self::inner::*; +pub use middleware::{RpcMiddleware, RpcMetrics}; /// Construct rpc `IoHandler` pub fn rpc_handler( - extension: impl IoHandlerExtension + extension: impl IoHandlerExtension, + rpc_middleware: RpcMiddleware, ) -> RpcHandler { - let mut io = pubsub::PubSubHandler::default(); + let io_handler = MetaIoHandler::with_middleware(rpc_middleware); + let mut io = pubsub::PubSubHandler::new(io_handler); extension.augment(&mut io); // add an endpoint to list all available methods. diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs new file mode 100644 index 0000000000000000000000000000000000000000..74139714c8cb7eded05cdc7644e0f1a26f35e46b --- /dev/null +++ b/client/rpc-servers/src/middleware.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Middleware for RPC requests. + +use jsonrpc_core::{ + Middleware as RequestMiddleware, Metadata, + Request, Response, FutureResponse, FutureOutput +}; +use prometheus_endpoint::{ + Registry, CounterVec, PrometheusError, + Opts, register, U64 +}; + +use futures::{future::Either, Future}; + +/// Metrics for RPC middleware +#[derive(Debug, Clone)] +pub struct RpcMetrics { + rpc_calls: CounterVec, +} + +impl RpcMetrics { + /// Create an instance of metrics + pub fn new(metrics_registry: Option<&Registry>) -> Result { + metrics_registry.and_then(|r| { + Some(RpcMetrics { + rpc_calls: register(CounterVec::new( + Opts::new( + "rpc_calls_total", + "Number of rpc calls received", + ), + &["protocol"] + ).ok()?, r).ok()?, + }) + }).ok_or(PrometheusError::Msg("Cannot register metric".to_string())) + } +} + +/// Middleware for RPC calls +pub struct RpcMiddleware { + metrics: Option, + transport_label: String, +} + +impl RpcMiddleware { + /// Create an instance of middleware with provided metrics + /// transport_label is used as a label for Prometheus collector + pub fn new(metrics: Option, transport_label: &str) -> Self { + RpcMiddleware { + metrics, + transport_label: String::from(transport_label), + } + } +} + +impl RequestMiddleware for RpcMiddleware { + type Future = FutureResponse; + type CallFuture = FutureOutput; + + fn on_request(&self, request: Request, meta: M, next: F) -> Either + where + F: Fn(Request, M) -> X + Send + Sync, + X: Future, Error = ()> + Send + 'static, + { + if let Some(ref metrics) = self.metrics { + metrics.rpc_calls.with_label_values(&[self.transport_label.as_str()]).inc(); + } + + Either::B(next(request, meta)) + } +} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 5681672cc3457ae01df702a5fb81fd82a7072c0f..12020bd32605e6363811016e3ce036a16dca55ef 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -1,40 +1,41 @@ [package] name = "sc-rpc" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate Client RPC" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-rpc-api = { version = "0.8.0-rc5", path = "../rpc-api" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } +sc-rpc-api = { version = "0.8.0", path = "../rpc-api" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.4" } futures = { version = "0.3.1", features = ["compat"] } -jsonrpc-pubsub = "14.2.0" +jsonrpc-pubsub = "15.0.0" log = "0.4.8" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -rpc = { package = "jsonrpc-core", version = "14.2.0" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +rpc = { package = "jsonrpc-core", version = "15.0.0" } +sp-version = { version = "2.0.0", path = "../../primitives/version" } serde_json = "1.0.41" -sp-session = { version = "2.0.0-rc5", path = "../../primitives/session" } -sp-offchain = { version = "2.0.0-rc5", path = "../../primitives/offchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-rpc = { version = "2.0.0-rc5", path = "../../primitives/rpc" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-chain-spec = { version = "2.0.0-rc5", path = "../../primitives/chain-spec" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../client/block-builder" } -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } +sp-session = { version = "2.0.0", path = "../../primitives/session" } +sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sp-chain-spec = { version = "2.0.0", path = "../../primitives/chain-spec" } +sc-executor = { version = "0.8.0", path = "../executor" } +sc-block-builder = { version = "0.8.0", path = "../../client/block-builder" } +sc-keystore = { version = "2.0.0", path = "../keystore" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } hash-db = { version = "0.15.2", default-features = false } parking_lot = "0.10.0" lazy_static = { version = "1.4.0", optional = true } @@ -42,11 +43,11 @@ lazy_static = { version = "1.4.0", optional = true } [dev-dependencies] assert_matches = "1.3.0" futures01 = { package = "futures", version = "0.1.29" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +sc-network = { version = "0.8.0", path = "../network" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "0.1.22" -sc-transaction-pool = { version = "2.0.0-rc5", path = "../transaction-pool" } +sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } [features] test-helpers = ["lazy_static"] diff --git a/client/rpc/README.md b/client/rpc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6066af4da71a36ed2748f1cce2c360592e2b51ab --- /dev/null +++ b/client/rpc/README.md @@ -0,0 +1,5 @@ +Substrate RPC implementation. + +A core implementation of Substrate RPC interfaces. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index dfe1fcc415159719ad59ea6a2798be7aa6efcaca..f16d7da5b1a8b6f53600e0d8d51496e8e567027b 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -73,7 +73,6 @@ fn api>>(sync: T) -> System { peers.push(PeerInfo { peer_id: status.peer_id.to_base58(), roles: format!("{}", Role::Full), - protocol_version: 1, best_hash: Default::default(), best_number: 1, }); @@ -87,8 +86,6 @@ fn api>>(sync: T) -> System { external_addresses: Default::default(), connected_peers: Default::default(), not_connected_peers: Default::default(), - total_bytes_inbound: 0, - total_bytes_outbound: 0, peerset: serde_json::Value::Null, }).unwrap()); }, @@ -261,7 +258,6 @@ fn system_peers() { vec![PeerInfo { peer_id: peer_id.to_base58(), roles: "FULL".into(), - protocol_version: 1, best_hash: Default::default(), best_number: 1u64, }] @@ -282,8 +278,6 @@ fn system_network_state() { external_addresses: Default::default(), connected_peers: Default::default(), not_connected_peers: Default::default(), - total_bytes_inbound: 0, - total_bytes_outbound: 0, peerset: serde_json::Value::Null, } ); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 6462549403b6f14093d8cb2aea14651a7684ef69..f57a145422c8c08e893fa140ca701ea8951520dc 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-service" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. Manages communication between them." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -26,8 +27,8 @@ test-helpers = [] derive_more = "0.99.2" futures01 = { package = "futures", version = "0.1.29" } futures = { version = "0.3.4", features = ["compat"] } -jsonrpc-pubsub = "14.2" -jsonrpc-core = "14.2" +jsonrpc-pubsub = "15.0" +jsonrpc-core = "15.0" rand = "0.7.3" parking_lot = "0.10.0" lazy_static = "1.4.0" @@ -40,40 +41,41 @@ pin-project = "0.4.8" hash-db = "0.15.2" serde = "1.0.101" serde_json = "1.0.41" -sc-keystore = { version = "2.0.0-rc5", path = "../keystore" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-trie = { version = "2.0.0-rc5", path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-session = { version = "2.0.0-rc5", path = "../../primitives/session" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-rc5", path = "../../primitives/inherents" } -sc-network = { version = "0.8.0-rc5", path = "../network" } -sc-chain-spec = { version = "2.0.0-rc5", path = "../chain-spec" } -sc-light = { version = "2.0.0-rc5", path = "../light" } -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sc-client-db = { version = "0.8.0-rc5", default-features = false, path = "../db" } +sc-keystore = { version = "2.0.0", path = "../keystore" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-trie = { version = "2.0.0", path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-session = { version = "2.0.0", path = "../../primitives/session" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } +sc-network = { version = "0.8.0", path = "../network" } +sc-chain-spec = { version = "2.0.0", path = "../chain-spec" } +sc-light = { version = "2.0.0", path = "../light" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } +sc-client-db = { version = "0.8.0", default-features = false, path = "../db" } codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor = { version = "0.8.0-rc5", path = "../executor" } -sc-transaction-pool = { version = "2.0.0-rc5", path = "../transaction-pool" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sc-rpc-server = { version = "2.0.0-rc5", path = "../rpc-servers" } -sc-rpc = { version = "2.0.0-rc5", path = "../rpc" } -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../primitives/block-builder" } -sc-informant = { version = "0.8.0-rc2", path = "../informant" } -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } -sc-offchain = { version = "2.0.0-rc5", path = "../offchain" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sc-tracing = { version = "2.0.0-rc5", path = "../tracing" } -tracing = "0.1.18" +sc-executor = { version = "0.8.0", path = "../executor" } +sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sc-rpc-server = { version = "2.0.0", path = "../rpc-servers" } +sc-rpc = { version = "2.0.0", path = "../rpc" } +sc-block-builder = { version = "0.8.0", path = "../block-builder" } +sp-block-builder = { version = "2.0.0", path = "../../primitives/block-builder" } +sc-informant = { version = "0.8.0", path = "../informant" } +sc-telemetry = { version = "2.0.0", path = "../telemetry" } +sc-offchain = { version = "2.0.0", path = "../offchain" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} +sc-tracing = { version = "2.0.0", path = "../tracing" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +tracing = "0.1.19" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] @@ -81,9 +83,9 @@ tempfile = "3.1.0" directories = "2.0.2" [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-rc5", path = "../../primitives/consensus/babe" } -grandpa = { version = "0.8.0-rc5", package = "sc-finality-grandpa", path = "../finality-grandpa" } -grandpa-primitives = { version = "2.0.0-rc5", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0", path = "../../primitives/consensus/babe" } +grandpa = { version = "0.8.0", package = "sc-finality-grandpa", path = "../finality-grandpa" } +grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } tokio = { version = "0.2", default-features = false } async-std = { version = "1.6", default-features = false } diff --git a/client/service/README.md b/client/service/README.md new file mode 100644 index 0000000000000000000000000000000000000000..26f940f16df02328ad03e728e2a033ce3013acc6 --- /dev/null +++ b/client/service/README.md @@ -0,0 +1,4 @@ +Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. +Manages communication between them. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 8ad95511f77d317944ccac5ac27bee2e22c431c1..410198af26da36fa3084454cfc00847e1106a57f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -17,10 +17,10 @@ // along with this program. If not, see . use crate::{ - NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, + error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, TelemetryConnectionSinks, RpcHandlers, NetworkStatusSinks, start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, - status_sinks, metrics::MetricsService, + metrics::MetricsService, client::{light, Client, ClientConfig}, config::{Configuration, KeystoreConfig, PrometheusConfig}, }; @@ -36,7 +36,7 @@ use sp_consensus::{ use futures::{FutureExt, StreamExt, future::ready, channel::oneshot}; use jsonrpc_pubsub::manager::SubscriptionManager; use sc_keystore::Store as Keystore; -use log::{info, warn, error}; +use log::{info, warn}; use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; use sc_network::NetworkService; use parking_lot::RwLock; @@ -46,7 +46,7 @@ use sp_runtime::traits::{ }; use sp_api::{ProvideRuntimeApi, CallApiAt}; use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use wasm_timer::SystemTime; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::MaintainedTransactionPool; @@ -73,17 +73,25 @@ pub trait RpcExtensionBuilder { /// Returns an instance of the RPC extension for a particular `DenyUnsafe` /// value, e.g. the RPC extension might not expose some unsafe methods. - fn build(&self, deny: sc_rpc::DenyUnsafe, subscriptions: SubscriptionManager) -> Self::Output; + fn build( + &self, + deny: sc_rpc::DenyUnsafe, + subscription_executor: sc_rpc::SubscriptionTaskExecutor, + ) -> Self::Output; } impl RpcExtensionBuilder for F where - F: Fn(sc_rpc::DenyUnsafe, SubscriptionManager) -> R, + F: Fn(sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> R, R: sc_rpc::RpcExtension, { type Output = R; - fn build(&self, deny: sc_rpc::DenyUnsafe, subscriptions: SubscriptionManager) -> Self::Output { - (*self)(deny, subscriptions) + fn build( + &self, + deny: sc_rpc::DenyUnsafe, + subscription_executor: sc_rpc::SubscriptionTaskExecutor, + ) -> Self::Output { + (*self)(deny, subscription_executor) } } @@ -97,7 +105,11 @@ impl RpcExtensionBuilder for NoopRpcExtensionBuilder where { type Output = R; - fn build(&self, _deny: sc_rpc::DenyUnsafe, _subscriptions: SubscriptionManager) -> Self::Output { + fn build( + &self, + _deny: sc_rpc::DenyUnsafe, + _subscription_executor: sc_rpc::SubscriptionTaskExecutor, + ) -> Self::Output { self.0.clone() } } @@ -431,7 +443,7 @@ pub fn build_offchain_workers( client.clone(), offchain, Clone::clone(&spawn_handle), - network.clone() + network.clone(), ) ); } @@ -472,7 +484,9 @@ pub fn spawn_tasks( transaction_pool, rpc_extensions_builder, remote_blockchain, - network, network_status_sinks, system_rpc_tx, + network, + network_status_sinks, + system_rpc_tx, telemetry_connection_sinks, } = params; @@ -521,26 +535,32 @@ pub fn spawn_tasks( MetricsService::new() }; - // Periodically notify the telemetry. - spawn_handle.spawn("telemetry-periodic-send", telemetry_periodic_send( - client.clone(), transaction_pool.clone(), metrics_service, network_status_sinks.clone() - )); - - // Periodically send the network state to the telemetry. - spawn_handle.spawn( - "telemetry-periodic-network-state", - telemetry_periodic_network_state(network_status_sinks.clone()), + // Periodically updated metrics and telemetry updates. + spawn_handle.spawn("telemetry-periodic-send", + metrics_service.run( + client.clone(), + transaction_pool.clone(), + network_status_sinks.clone() + ) ); // RPC - let gen_handler = |deny_unsafe: sc_rpc::DenyUnsafe| gen_handler( - deny_unsafe, &config, task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), - keystore.clone(), on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, + let gen_handler = | + deny_unsafe: sc_rpc::DenyUnsafe, + rpc_middleware: sc_rpc_server::RpcMiddleware + | gen_handler( + deny_unsafe, rpc_middleware, &config, task_manager.spawn_handle(), + client.clone(), transaction_pool.clone(), keystore.clone(), + on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, backend.offchain_storage(), system_rpc_tx.clone() ); - let rpc = start_rpc_servers(&config, gen_handler)?; + let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry()).ok(); + let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.as_ref())?; // This is used internally, so don't restrict access to unsafe RPC - let rpc_handlers = RpcHandlers(Arc::new(gen_handler(sc_rpc::DenyUnsafe::No).into())); + let rpc_handlers = RpcHandlers(Arc::new(gen_handler( + sc_rpc::DenyUnsafe::No, + sc_rpc_server::RpcMiddleware::new(rpc_metrics.as_ref().cloned(), "inbrowser") + ).into())); // Telemetry let telemetry = config.telemetry_endpoints.clone().and_then(|endpoints| { @@ -560,21 +580,10 @@ pub fn spawn_tasks( )) }); - // Instrumentation - if let Some(tracing_targets) = config.tracing_targets.as_ref() { - let subscriber = sc_tracing::ProfilingSubscriber::new( - config.tracing_receiver, tracing_targets - ); - match tracing::subscriber::set_global_default(subscriber) { - Ok(_) => (), - Err(e) => error!(target: "tracing", "Unable to set global default subscriber {}", e), - } - } - // Spawn informant task spawn_handle.spawn("informant", sc_informant::build( client.clone(), - network_status_sinks.clone().0, + network_status_sinks.status.clone(), transaction_pool.clone(), config.informant_output_format, )); @@ -606,47 +615,6 @@ async fn transaction_notifications( .await; } -// Periodically notify the telemetry. -async fn telemetry_periodic_send( - client: Arc, - transaction_pool: Arc, - mut metrics_service: MetricsService, - network_status_sinks: NetworkStatusSinks, -) - where - TBl: BlockT, - TCl: ProvideRuntimeApi + UsageProvider, - TExPool: MaintainedTransactionPool::Hash>, -{ - let (state_tx, state_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat1"); - network_status_sinks.0.push(std::time::Duration::from_millis(5000), state_tx); - state_rx.for_each(move |(net_status, _)| { - let info = client.usage_info(); - metrics_service.tick( - &info, - &transaction_pool.status(), - &net_status, - ); - ready(()) - }).await; -} - -async fn telemetry_periodic_network_state( - network_status_sinks: NetworkStatusSinks, -) { - // Periodically send the network state to the telemetry. - let (netstat_tx, netstat_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat2"); - network_status_sinks.0.push(std::time::Duration::from_secs(30), netstat_tx); - netstat_rx.for_each(move |(_, network_state)| { - telemetry!( - SUBSTRATE_INFO; - "system.network_state"; - "state" => network_state, - ); - ready(()) - }).await; -} - fn build_telemetry( config: &mut Configuration, endpoints: sc_telemetry::TelemetryEndpoints, @@ -700,6 +668,7 @@ fn build_telemetry( fn gen_handler( deny_unsafe: sc_rpc::DenyUnsafe, + rpc_middleware: sc_rpc_server::RpcMiddleware, config: &Configuration, spawn_handle: SpawnTaskHandle, client: Arc, @@ -710,7 +679,7 @@ fn gen_handler( rpc_extensions_builder: &(dyn RpcExtensionBuilder + Send), offchain_storage: Option<>::OffchainStorage>, system_rpc_tx: TracingUnboundedSender> -) -> jsonrpc_pubsub::PubSubHandler +) -> sc_rpc_server::RpcHandler where TBl: BlockT, TCl: ProvideRuntimeApi + BlockchainEvents + HeaderBackend + @@ -735,7 +704,7 @@ fn gen_handler( }; let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle); - let subscriptions = SubscriptionManager::new(Arc::new(task_executor)); + let subscriptions = SubscriptionManager::new(Arc::new(task_executor.clone())); let (chain, state, child_state) = if let (Some(remote_blockchain), Some(on_demand)) = (remote_blockchain, on_demand) { @@ -764,30 +733,29 @@ fn gen_handler( let author = sc_rpc::author::Author::new( client, transaction_pool, - subscriptions.clone(), + subscriptions, keystore, deny_unsafe, ); let system = system::System::new(system_info, system_rpc_tx, deny_unsafe); - let maybe_offchain_rpc = offchain_storage - .map(|storage| { + let maybe_offchain_rpc = offchain_storage.map(|storage| { let offchain = sc_rpc::offchain::Offchain::new(storage, deny_unsafe); - // FIXME: Use plain Option (don't collect into HashMap) when we upgrade to jsonrpc 14.1 - // https://github.com/paritytech/jsonrpc/commit/20485387ed06a48f1a70bf4d609a7cde6cf0accf - let delegate = offchain::OffchainApi::to_delegate(offchain); - delegate.into_iter().collect::>() - }).unwrap_or_default(); - - sc_rpc_server::rpc_handler(( - state::StateApi::to_delegate(state), - state::ChildStateApi::to_delegate(child_state), - chain::ChainApi::to_delegate(chain), - maybe_offchain_rpc, - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions_builder.build(deny_unsafe, subscriptions), - )) + offchain::OffchainApi::to_delegate(offchain) + }); + + sc_rpc_server::rpc_handler( + ( + state::StateApi::to_delegate(state), + state::ChildStateApi::to_delegate(child_state), + chain::ChainApi::to_delegate(chain), + maybe_offchain_rpc, + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions_builder.build(deny_unsafe, task_executor), + ), + rpc_middleware + ) } /// Parameters to pass into `build_network`. @@ -854,7 +822,7 @@ pub fn build_network( ); DEFAULT_PROTOCOL_ID } - }.as_bytes(); + }; sc_network::config::ProtocolId::from(protocol_id_full) }; @@ -887,7 +855,7 @@ pub fn build_network( let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); - let network_status_sinks = NetworkStatusSinks::new(Arc::new(status_sinks::StatusSinks::new())); + let network_status_sinks = NetworkStatusSinks::new(); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); diff --git a/client/service/src/chain_ops/build_sync_spec.rs b/client/service/src/chain_ops/build_sync_spec.rs new file mode 100644 index 0000000000000000000000000000000000000000..9553ea21a696528e4bf7911357030b352f03a4a7 --- /dev/null +++ b/client/service/src/chain_ops/build_sync_spec.rs @@ -0,0 +1,36 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use sp_runtime::traits::Block as BlockT; +use sp_blockchain::HeaderBackend; +use std::sync::Arc; +use sp_runtime::generic::BlockId; + +/// Build a `LightSyncState` from the CHT roots stored in a backend. +pub fn build_light_sync_state( + client: Arc, +) -> Result, sp_blockchain::Error> + where + TBl: BlockT, + TCl: HeaderBackend, +{ + let finalized_hash = client.info().finalized_hash; + let header = client.header(BlockId::Hash(finalized_hash))?.unwrap(); + + Ok(sc_chain_spec::LightSyncState { + header + }) +} diff --git a/client/service/src/chain_ops/mod.rs b/client/service/src/chain_ops/mod.rs index af6e6f632fc06438f8396c158425d57d30727ecd..e6b2fdfb8e0e684606a7201fc0dab998ec0a2525 100644 --- a/client/service/src/chain_ops/mod.rs +++ b/client/service/src/chain_ops/mod.rs @@ -21,9 +21,11 @@ mod export_blocks; mod export_raw_state; mod import_blocks; mod revert_chain; +mod build_sync_spec; pub use check_block::*; pub use export_blocks::*; pub use export_raw_state::*; pub use import_blocks::*; pub use revert_chain::*; +pub use build_sync_spec::*; diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index d0859f4ee0392d2efb8c161c79f5a69aba8607b5..fd76084988dbc5f55f4a01d4f08bb64d1df91b31 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -802,7 +802,8 @@ impl Client where operation.op.insert_aux(aux)?; - if make_notifications { + // we only notify when we are already synced to the tip of the chain or if this import triggers a re-org + if make_notifications || tree_route.is_some() { if finalized { operation.notify_finalized.push(hash); } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index d19b9f5ea247d7467ba549a5de8f79287475e611..39f1dff289a1a265c770c5f38888aee9e24970eb 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -97,7 +97,7 @@ impl MallocSizeOfWasm for T {} /// RPC handlers that can perform RPC queries. #[derive(Clone)] -pub struct RpcHandlers(Arc>); +pub struct RpcHandlers(Arc>); impl RpcHandlers { /// Starts an RPC query. @@ -118,7 +118,8 @@ impl RpcHandlers { } /// Provides access to the underlying `MetaIoHandler` - pub fn io_handler(&self) -> Arc> { + pub fn io_handler(&self) + -> Arc> { self.0.clone() } } @@ -126,24 +127,37 @@ impl RpcHandlers { /// Sinks to propagate network status updates. /// For each element, every time the `Interval` fires we push an element on the sender. #[derive(Clone)] -pub struct NetworkStatusSinks( - Arc, NetworkState)>>, -); +pub struct NetworkStatusSinks { + status: Arc>>, + state: Arc>, +} impl NetworkStatusSinks { - fn new( - sinks: Arc, NetworkState)>> - ) -> Self { - Self(sinks) + fn new() -> Self { + Self { + status: Arc::new(status_sinks::StatusSinks::new()), + state: Arc::new(status_sinks::StatusSinks::new()), + } } - /// Returns a receiver that periodically receives a status of the network. - pub fn network_status(&self, interval: Duration) - -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)> { + /// Returns a receiver that periodically yields a [`NetworkStatus`]. + pub fn status_stream(&self, interval: Duration) + -> TracingUnboundedReceiver> + { let (sink, stream) = tracing_unbounded("mpsc_network_status"); - self.0.push(interval, sink); + self.status.push(interval, sink); + stream + } + + /// Returns a receiver that periodically yields a [`NetworkState`]. + pub fn state_stream(&self, interval: Duration) + -> TracingUnboundedReceiver + { + let (sink, stream) = tracing_unbounded("mpsc_network_state"); + self.state.push(interval, sink); stream } + } /// Sinks to propagate telemetry connection established events. @@ -272,7 +286,6 @@ async fn build_network_future< sc_rpc::system::PeerInfo { peer_id: peer_id.to_base58(), roles: format!("{:?}", p.roles), - protocol_version: p.protocol_version, best_hash: p.best_hash, best_number: p.best_number, } @@ -319,20 +332,16 @@ async fn build_network_future< // the network. _ = (&mut network).fuse() => {} - // At a regular interval, we send the state of the network on what is called - // the "status sinks". - ready_sink = status_sinks.0.next().fuse() => { - let status = NetworkStatus { - sync_state: network.sync_state(), - best_seen_block: network.best_seen_block(), - num_sync_peers: network.num_sync_peers(), - num_connected_peers: network.num_connected_peers(), - num_active_peers: network.num_active_peers(), - total_bytes_inbound: network.total_bytes_inbound(), - total_bytes_outbound: network.total_bytes_outbound(), - }; - let state = network.network_state(); - ready_sink.send((status, state)); + // At a regular interval, we send high-level status as well as + // detailed state information of the network on what are called + // "status sinks". + + status_sink = status_sinks.status.next().fuse() => { + status_sink.send(network.status()); + } + + state_sink = status_sinks.state.next().fuse() => { + state_sink.send(network.network_state()); } } } @@ -374,9 +383,13 @@ mod waiting { /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(not(target_os = "unknown"))] -fn start_rpc_servers sc_rpc_server::RpcHandler>( +fn start_rpc_servers< + H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) + -> sc_rpc_server::RpcHandler +>( config: &Configuration, - mut gen_handler: H + mut gen_handler: H, + rpc_metrics: Option<&sc_rpc_server::RpcMetrics> ) -> Result, error::Error> { fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> where F: FnMut(&SocketAddr) -> Result, @@ -406,13 +419,21 @@ fn start_rpc_servers sc_rpc_server::RpcHandler sc_rpc_server::RpcHandler sc_rpc_server::RpcHandler sc_rpc_server::RpcHandler>( +fn start_rpc_servers< + H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) + -> sc_rpc_server::RpcHandler +>( _: &Configuration, - _: H + _: H, + _: Option<&sc_rpc_server::RpcMetrics> ) -> Result, error::Error> { Ok(Box::new(())) } diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 90a77667581bf0b952feec7ea431e08eec319802..0af393b53f5178cf6873aeb126954d0c668a2992 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -18,14 +18,19 @@ use std::{convert::TryFrom, time::SystemTime}; -use crate::{NetworkStatus, config::Configuration}; +use crate::{NetworkStatus, NetworkState, NetworkStatusSinks, config::Configuration}; +use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, U64, Registry, PrometheusError, Opts, GaugeVec}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sp_api::ProvideRuntimeApi; use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; -use sp_transaction_pool::PoolStatus; +use sp_transaction_pool::{PoolStatus, MaintainedTransactionPool}; use sp_utils::metrics::register_globals; -use sc_client_api::ClientInfo; +use sp_utils::mpsc::TracingUnboundedReceiver; +use sc_client_api::{ClientInfo, UsageProvider}; use sc_network::config::Role; +use std::sync::Arc; +use std::time::Duration; use wasm_timer::Instant; struct PrometheusMetrics { @@ -99,6 +104,9 @@ impl PrometheusMetrics { } } +/// A `MetricsService` periodically sends general client and +/// network state to the telemetry as well as (optionally) +/// a Prometheus endpoint. pub struct MetricsService { metrics: Option, last_update: Instant, @@ -107,6 +115,8 @@ pub struct MetricsService { } impl MetricsService { + /// Creates a `MetricsService` that only sends information + /// to the telemetry. pub fn new() -> Self { MetricsService { metrics: None, @@ -116,6 +126,8 @@ impl MetricsService { } } + /// Creates a `MetricsService` that sends metrics + /// to prometheus alongside the telemetry. pub fn with_prometheus( registry: &Registry, config: &Configuration, @@ -141,60 +153,109 @@ impl MetricsService { }) } - pub fn tick( + /// Returns a never-ending `Future` that performs the + /// metric and telemetry updates with information from + /// the given sources. + pub async fn run( + mut self, + client: Arc, + transactions: Arc, + network: NetworkStatusSinks, + ) where + TBl: Block, + TCl: ProvideRuntimeApi + UsageProvider, + TExPool: MaintainedTransactionPool::Hash>, + { + let mut timer = Delay::new(Duration::from_secs(0)); + let timer_interval = Duration::from_secs(5); + + // Metric and telemetry update interval. + let net_status_interval = timer_interval; + let net_state_interval = Duration::from_secs(30); + + // Source of network information. + let mut net_status_rx = Some(network.status_stream(net_status_interval)); + let mut net_state_rx = Some(network.state_stream(net_state_interval)); + + loop { + // Wait for the next tick of the timer. + (&mut timer).await; + + // Try to get the latest network information. + let mut net_status = None; + let mut net_state = None; + if let Some(rx) = net_status_rx.as_mut() { + match Self::latest(rx) { + Ok(status) => { net_status = status; } + Err(()) => { net_status_rx = None; } + } + } + if let Some(rx) = net_state_rx.as_mut() { + match Self::latest(rx) { + Ok(state) => { net_state = state; } + Err(()) => { net_state_rx = None; } + } + } + + // Update / Send the metrics. + self.update( + &client.usage_info(), + &transactions.status(), + net_status, + net_state, + ); + + // Schedule next tick. + timer.reset(timer_interval); + } + } + + // Try to get the latest value from a receiver, dropping intermediate values. + fn latest(rx: &mut TracingUnboundedReceiver) -> Result, ()> { + let mut value = None; + + while let Ok(next) = rx.try_next() { + match next { + Some(v) => { + value = Some(v) + } + None => { + log::error!("Receiver closed unexpectedly."); + return Err(()) + } + } + } + + Ok(value) + } + + fn update( &mut self, info: &ClientInfo, txpool_status: &PoolStatus, - net_status: &NetworkStatus, + net_status: Option>, + net_state: Option, ) { let now = Instant::now(); let elapsed = (now - self.last_update).as_secs(); + self.last_update = now; let best_number = info.chain.best_number.saturated_into::(); let best_hash = info.chain.best_hash; - let num_peers = net_status.num_connected_peers; let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); - let total_bytes_inbound = net_status.total_bytes_inbound; - let total_bytes_outbound = net_status.total_bytes_outbound; - let best_seen_block = net_status - .best_seen_block - .map(|num: NumberFor| num.unique_saturated_into() as u64); - - let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; - let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; - let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = - if elapsed > 0 { - self.last_total_bytes_inbound = total_bytes_inbound; - self.last_total_bytes_outbound = total_bytes_outbound; - (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) - } else { - (diff_bytes_inbound, diff_bytes_outbound) - }; - self.last_update = now; + // Update/send metrics that are always available. telemetry!( SUBSTRATE_INFO; "system.interval"; - "peers" => num_peers, "height" => best_number, "best" => ?best_hash, "txcount" => txpool_status.ready, "finalized_height" => finalized_number, "finalized_hash" => ?info.chain.finalized_hash, - "bandwidth_download" => avg_bytes_per_sec_inbound, - "bandwidth_upload" => avg_bytes_per_sec_outbound, "used_state_cache_size" => info.usage.as_ref() .map(|usage| usage.memory.state_cache.as_bytes()) .unwrap_or(0), - "used_db_cache_size" => info.usage.as_ref() - .map(|usage| usage.memory.database_cache.as_bytes()) - .unwrap_or(0), - "disk_read_per_sec" => info.usage.as_ref() - .map(|usage| usage.io.bytes_read) - .unwrap_or(0), - "disk_write_per_sec" => info.usage.as_ref() - .map(|usage| usage.io.bytes_written) - .unwrap_or(0), ); if let Some(metrics) = self.metrics.as_ref() { @@ -213,10 +274,6 @@ impl MetricsService { metrics.ready_transactions_number.set(txpool_status.ready as u64); - if let Some(best_seen_block) = best_seen_block { - metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); - } - if let Some(info) = info.usage.as_ref() { metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64); metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64); @@ -232,5 +289,50 @@ impl MetricsService { ); } } + + // Update/send network status information, if any. + if let Some(net_status) = net_status { + let num_peers = net_status.num_connected_peers; + let total_bytes_inbound = net_status.total_bytes_inbound; + let total_bytes_outbound = net_status.total_bytes_outbound; + + let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; + let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = + if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) + }; + + telemetry!( + SUBSTRATE_INFO; + "system.interval"; + "peers" => num_peers, + "bandwidth_download" => avg_bytes_per_sec_inbound, + "bandwidth_upload" => avg_bytes_per_sec_outbound, + ); + + if let Some(metrics) = self.metrics.as_ref() { + let best_seen_block = net_status + .best_seen_block + .map(|num: NumberFor| num.unique_saturated_into() as u64); + + if let Some(best_seen_block) = best_seen_block { + metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); + } + } + } + + // Send network state information, if any. + if let Some(net_state) = net_state { + telemetry!( + SUBSTRATE_INFO; + "system.network_state"; + "state" => net_state, + ); + } } } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index d8f069eadfd72c342a6fef2c4250c923cef5b747..fde79d19ede713f9aedef111715ca79544d1759d 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service-test" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -12,33 +12,33 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex-literal = "0.2.1" +hex-literal = "0.3.1" tempfile = "3.1.0" tokio = "0.1.22" futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -env_logger = "0.7.0" -fdlimit = "0.1.4" +fdlimit = "0.2.0" parking_lot = "0.10.0" -sc-light = { version = "2.0.0-rc5", path = "../../light" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-externalities = { version = "0.8.0-rc5", path = "../../../primitives/externalities" } -sp-trie = { version = "2.0.0-rc5", path = "../../../primitives/trie" } -sp-storage = { version = "2.0.0-rc5", path = "../../../primitives/storage" } -sc-client-db = { version = "0.8.0-rc5", default-features = false, path = "../../db" } +sc-light = { version = "2.0.0", path = "../../light" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } +sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } +sp-storage = { version = "2.0.0", path = "../../../primitives/storage" } +sc-client-db = { version = "0.8.0", default-features = false, path = "../../db" } futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0-rc5", default-features = false, features = ["test-helpers"], path = "../../service" } -sc-network = { version = "0.8.0-rc5", path = "../../network" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sc-client-api = { version = "2.0.0-rc5", path = "../../api" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../block-builder" } -sc-executor = { version = "0.8.0-rc5", path = "../../executor" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../../../primitives/panic-handler" } +sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-network = { version = "0.8.0", path = "../../network" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sc-executor = { version = "0.8.0", path = "../../executor" } +sp-panic-handler = { version = "2.0.0", path = "../../../primitives/panic-handler" } parity-scale-codec = "1.3.4" +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index ffc84ad47b8f376b7f807e04376b36618278b086..f38aef008e11c803e5e001a2d077e6b505329628 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -42,7 +42,7 @@ use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVer use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; use sc_client_api::{ blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, - in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, + in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, ProvideChtRoots, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, @@ -164,6 +164,16 @@ impl Storage for DummyStorage { Err(ClientError::Backend("Test error".into())) } + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } +} + +impl ProvideChtRoots for DummyStorage { fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { Err(ClientError::Backend("Test error".into())) } @@ -177,14 +187,6 @@ impl Storage for DummyStorage { ).into()) .map(Some) } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } } struct DummyCallExecutor; diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 8d073df272fd9ab574b8de37db68c856cc717df0..34b063a3e3484b89eca86db8bdca995b524a5002 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1206,7 +1206,7 @@ fn get_header_by_block_number_doesnt_panic() { #[test] fn state_reverted_on_reorg() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let mut client = substrate_test_runtime_client::new(); let current_balance = |client: &substrate_test_runtime_client::TestClient| @@ -1266,7 +1266,7 @@ fn state_reverted_on_reorg() { #[test] fn doesnt_import_blocks_that_revert_finality() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let tmp = tempfile::tempdir().unwrap(); // we need to run with archive pruning to avoid pruning non-canonical @@ -1467,7 +1467,7 @@ fn respects_block_rules() { #[test] fn returns_status_for_pruned_blocks() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); let tmp = tempfile::tempdir().unwrap(); // set to prune after 1 block @@ -1802,3 +1802,57 @@ fn cleans_up_closed_notification_sinks_on_block_import() { assert_eq!(client.finality_notification_sinks().lock().len(), 0); } +/// Test that ensures that we always send an import notification for re-orgs. +#[test] +fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifications() { + let mut client = TestClientBuilder::new().build(); + + let mut notification_stream = futures::executor::block_on_stream( + client.import_notification_stream() + ); + + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::NetworkInitialSync, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::NetworkInitialSync, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::NetworkInitialSync, b1.clone()).unwrap(); + + let b2 = client.new_block_at( + &BlockId::Hash(b1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + + // Should trigger a notification because we reorg + client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone()).unwrap(); + + // There should be one notification + let notification = notification_stream.next().unwrap(); + + // We should have a tree route of the re-org + let tree_route = notification.tree_route.unwrap(); + assert_eq!(tree_route.enacted()[0].hash, b1.hash()); +} \ No newline at end of file diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 0d589cee7e12d220319b939d77db03021e5196f7..cfe815f174fac7c39d280a00709725d26a50af43 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -289,7 +289,7 @@ impl TestNet where )>, base_port: u16 ) -> TestNet { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); fdlimit::raise_fd_limit(); let runtime = Runtime::new().expect("Error creating tokio runtime"); let mut net = TestNet { diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 685f68f083510e13dea2610d2a18ba0bba8e9290..4d3e736d9539e75c41cee79e20fd190c66d4283e 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-state-db" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "State database maintenance. Handles canonicalization and pruning in the database." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,11 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = "0.10.0" log = "0.4.8" -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sc-client-api = { version = "2.0.0", path = "../api" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" - -[dev-dependencies] -env_logger = "0.7.0" diff --git a/client/state-db/README.md b/client/state-db/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a02b3929088fc8289a22d4a372101ca703cb6876 --- /dev/null +++ b/client/state-db/README.md @@ -0,0 +1,16 @@ +State database maintenance. Handles canonicalization and pruning in the database. The input to +this module is a `ChangeSet` which is basically a list of key-value pairs (trie nodes) that +were added or deleted during block execution. + +# Canonicalization. +Canonicalization window tracks a tree of blocks identified by header hash. The in-memory +overlay allows to get any node that was inserted in any of the blocks within the window. +The tree is journaled to the backing database and rebuilt on startup. +Canonicalization function selects one root from the top of the tree and discards all other roots and +their subtrees. + +# Pruning. +See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until pruning +constraints are satisfied. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 4c9cc05b07fece268482a5c339429e91ce2c09f7..be7c88f68ae79a81de256583e2113a1fa6986936 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-telemetry" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" @@ -8,6 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-telemetry" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.23.0", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +libp2p = { version = "0.28.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/client/telemetry/README.md b/client/telemetry/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8fdf9e500722dd8ffc7835c628baab063ed64f17 --- /dev/null +++ b/client/telemetry/README.md @@ -0,0 +1,45 @@ +Telemetry utilities. + +Calling `init_telemetry` registers a global `slog` logger using `slog_scope::set_global_logger`. +After that, calling `slog_scope::with_logger` will return a logger that sends information to +the telemetry endpoints. The `telemetry!` macro is a short-cut for calling +`slog_scope::with_logger` followed with `slog_log!`. + +Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at +the moment. Substrate may eventually be reworked to get proper `slog` support, including sending +information to the telemetry. + +The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a +background thread/task) in order for the telemetry to properly function. Dropping the object +will also deregister the global logger and replace it with a logger that discards messages. +The `Stream` generates [`TelemetryEvent`]s. + +> **Note**: Cloning the [`Telemetry`] and polling from multiple clones has an unspecified behaviour. + +# Example + +```rust +use futures::prelude::*; + +let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { + endpoints: sc_telemetry::TelemetryEndpoints::new(vec![ + // The `0` is the maximum verbosity level of messages to send to this endpoint. + ("wss://example.com".into(), 0) + ]).expect("Invalid URL or multiaddr provided"), + // Can be used to pass an external implementation of WebSockets. + wasm_external_transport: None, +}); + +// The `telemetry` object implements `Stream` and must be processed. +std::thread::spawn(move || { + futures::executor::block_on(telemetry.for_each(|_| future::ready(()))); +}); + +// Sends a message on the telemetry. +sc_telemetry::telemetry!(sc_telemetry::SUBSTRATE_INFO; "test"; + "foo" => "bar", +) +``` + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 0a692cbe57f842f24a5e9ac34419383e449cdd2a..37b8cb47928241a94890ef6e7c36dcc6b8a641d0 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-tracing" -version = "2.0.0-rc5" +version = "2.0.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Instrumentation implementation for substrate." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -19,8 +20,8 @@ rustc-hash = "1.1.0" serde = "1.0.101" serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } -tracing = "0.1.18" +tracing = "0.1.19" +tracing-core = "0.1.13" tracing-subscriber = "0.2.10" -sp-tracing = { version = "2.0.0-rc2", path = "../../primitives/tracing" } - -sc-telemetry = { version = "2.0.0-rc5", path = "../telemetry" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sc-telemetry = { version = "2.0.0", path = "../telemetry" } diff --git a/client/tracing/README.md b/client/tracing/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b008436df9bbedd136f79be15c10ec5a7c7b9a07 --- /dev/null +++ b/client/tracing/README.md @@ -0,0 +1,11 @@ +Instrumentation implementation for substrate. + +This crate is unstable and the API and usage may change. + +# Usage + +See `sp-tracing` for examples on how to use tracing. + +Currently we provide `Log` (default), `Telemetry` variants for `Receiver` + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index f642b00720f1ad13db5b136b58e14e1c7c4fda41..6690f283464ea4674c91500f166e97d42b2025b8 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -26,7 +26,6 @@ use rustc_hash::FxHashMap; use std::fmt; -use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{Duration, Instant}; use parking_lot::Mutex; @@ -35,21 +34,17 @@ use tracing::{ event::Event, field::{Visit, Field}, Level, - metadata::Metadata, span::{Attributes, Id, Record}, subscriber::Subscriber, }; -use tracing_subscriber::CurrentSpan; +use tracing_subscriber::{CurrentSpan, layer::{Layer, Context}}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; -use sp_tracing::proxy::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; - +use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; const ZERO_DURATION: Duration = Duration::from_nanos(0); -const PROXY_TARGET: &'static str = "sp_tracing::proxy"; /// Responsible for assigning ids to new spans, which are not re-used. -pub struct ProfilingSubscriber { - next_id: AtomicU64, +pub struct ProfilingLayer { targets: Vec<(String, Level)>, trace_handler: Box, span_data: Mutex>, @@ -216,12 +211,12 @@ impl slog::Value for Values { } } -impl ProfilingSubscriber { +impl ProfilingLayer { /// Takes a `TracingReceiver` and a comma separated list of targets, /// either with a level: "pallet=trace,frame=debug" /// or without: "pallet,frame" in which case the level defaults to `trace`. /// wasm_tracing indicates whether to enable wasm traces - pub fn new(receiver: TracingReceiver, targets: &str) -> ProfilingSubscriber { + pub fn new(receiver: TracingReceiver, targets: &str) -> Self { match receiver { TracingReceiver::Log => Self::new_with_handler(Box::new(LogTraceHandler), targets), TracingReceiver::Telemetry => Self::new_with_handler( @@ -237,11 +232,10 @@ impl ProfilingSubscriber { /// or without: "pallet" in which case the level defaults to `trace`. /// wasm_tracing indicates whether to enable wasm traces pub fn new_with_handler(trace_handler: Box, targets: &str) - -> ProfilingSubscriber + -> Self { let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); - ProfilingSubscriber { - next_id: AtomicU64::new(1), + Self { targets, trace_handler, span_data: Mutex::new(FxHashMap::default()), @@ -276,27 +270,10 @@ fn parse_target(s: &str) -> (String, Level) { } } -impl Subscriber for ProfilingSubscriber { - fn enabled(&self, metadata: &Metadata<'_>) -> bool { - if metadata.target() == PROXY_TARGET || self.check_target(metadata.target(), metadata.level()) { - log::debug!(target: "tracing", "Enabled target: {}, level: {}", metadata.target(), metadata.level()); - true - } else { - log::debug!(target: "tracing", "Disabled target: {}, level: {}", metadata.target(), metadata.level()); - false - } - } - - fn new_span(&self, attrs: &Attributes<'_>) -> Id { - let id = Id::from_u64(self.next_id.fetch_add(1, Ordering::Relaxed)); +impl Layer for ProfilingLayer { + fn new_span(&self, attrs: &Attributes<'_>, id: &Id, _ctx: Context) { let mut values = Values::default(); attrs.record(&mut values); - // If this is a wasm trace, check if target/level is enabled - if let Some(wasm_target) = values.string_values.get(WASM_TARGET_KEY) { - if !self.check_target(wasm_target, attrs.metadata().level()) { - return id - } - } let span_datum = SpanDatum { id: id.clone(), parent_id: attrs.parent().cloned().or_else(|| self.current_span.id()), @@ -309,19 +286,16 @@ impl Subscriber for ProfilingSubscriber { values, }; self.span_data.lock().insert(id.clone(), span_datum); - id } - fn record(&self, span: &Id, values: &Record<'_>) { + fn on_record(&self, span: &Id, values: &Record<'_>, _ctx: Context) { let mut span_data = self.span_data.lock(); if let Some(s) = span_data.get_mut(span) { values.record(&mut s.values); } } - fn record_follows_from(&self, _span: &Id, _follows: &Id) {} - - fn event(&self, event: &Event<'_>) { + fn on_event(&self, event: &Event<'_>, _ctx: Context) { let mut values = Values::default(); event.record(&mut values); let trace_event = TraceEvent { @@ -334,7 +308,7 @@ impl Subscriber for ProfilingSubscriber { self.trace_handler.handle_event(trace_event); } - fn enter(&self, span: &Id) { + fn on_enter(&self, span: &Id, _ctx: Context) { self.current_span.enter(span.clone()); let mut span_data = self.span_data.lock(); let start_time = Instant::now(); @@ -343,21 +317,16 @@ impl Subscriber for ProfilingSubscriber { } } - fn exit(&self, span: &Id) { + fn on_exit(&self, span: &Id, _ctx: Context) { self.current_span.exit(); let end_time = Instant::now(); - let mut span_data = self.span_data.lock(); - if let Some(mut s) = span_data.get_mut(&span) { - s.overall_time = end_time - s.start_time + s.overall_time; - } - } - - fn try_close(&self, span: Id) -> bool { let span_datum = { let mut span_data = self.span_data.lock(); span_data.remove(&span) }; + if let Some(mut span_datum) = span_datum { + span_datum.overall_time += end_time - span_datum.start_time; if span_datum.name == WASM_TRACE_IDENTIFIER { span_datum.values.bool_values.insert("wasm".to_owned(), true); if let Some(n) = span_datum.values.string_values.remove(WASM_NAME_KEY) { @@ -373,7 +342,10 @@ impl Subscriber for ProfilingSubscriber { self.trace_handler.handle_span(span_datum); } }; - true + } + + fn on_close(&self, span: Id, ctx: Context) { + self.on_exit(&span, ctx) } } @@ -458,6 +430,7 @@ impl TraceHandler for TelemetryTraceHandler { mod tests { use super::*; use std::sync::Arc; + use tracing_subscriber::layer::SubscriberExt; struct TestTraceHandler { spans: Arc>>, @@ -474,18 +447,24 @@ mod tests { } } - fn setup_subscriber() -> (ProfilingSubscriber, Arc>>, Arc>>) { + type TestSubscriber = tracing_subscriber::layer::Layered< + ProfilingLayer, + tracing_subscriber::fmt::Subscriber + >; + + fn setup_subscriber() -> (TestSubscriber, Arc>>, Arc>>) { let spans = Arc::new(Mutex::new(Vec::new())); let events = Arc::new(Mutex::new(Vec::new())); let handler = TestTraceHandler { spans: spans.clone(), events: events.clone(), }; - let test_subscriber = ProfilingSubscriber::new_with_handler( + let layer = ProfilingLayer::new_with_handler( Box::new(handler), "test_target" ); - (test_subscriber, spans, events) + let subscriber = tracing_subscriber::fmt().finish().with(layer); + (subscriber, spans, events) } #[test] diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 56ea881d7a016b745702abed5e02a0ec63c8f0c6..5db37f536838727fb5c2ac7ccab785e5ec6be365 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate transaction pool implementation." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -20,23 +21,23 @@ intervalier = "0.4.0" log = "0.4.8" parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } parking_lot = "0.10.0" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-rc5"} -sc-client-api = { version = "2.0.0-rc5", path = "../api" } -sc-transaction-graph = { version = "2.0.0-rc5", path = "./graph" } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", path = "../../primitives/tracing" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-utils = { version = "2.0.0-rc5", path = "../../primitives/utils" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} +sc-client-api = { version = "2.0.0", path = "../api" } +sc-transaction-graph = { version = "2.0.0", path = "./graph" } +sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-utils = { version = "2.0.0", path = "../../primitives/utils" } wasm-timer = "0.2" [dev-dependencies] assert_matches = "1.3.0" hex = "0.4" -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -substrate-test-runtime-transaction-pool = { version = "2.0.0-rc5", path = "../../test-utils/runtime/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0-rc5", path = "../block-builder" } +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0", path = "../block-builder" } diff --git a/client/transaction-pool/README.md b/client/transaction-pool/README.md new file mode 100644 index 0000000000000000000000000000000000000000..15e4641c1f48d9d2c245e2e373620411669efdba --- /dev/null +++ b/client/transaction-pool/README.md @@ -0,0 +1,3 @@ +Substrate transaction pool implementation. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index 8719a9c8fed231f605c72ada93679a2d70001d9b..c5850e765fcfa88d59db43af4d37bc6d30c27158 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sc-transaction-graph" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Generic Transaction Pool" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,11 +19,11 @@ log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0.101", features = ["derive"] } wasm-timer = "0.2" -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-utils = { version = "2.0.0-rc5", path = "../../../primitives/utils" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-utils = { version = "2.0.0", path = "../../../primitives/utils" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } linked-hash-map = "0.5.2" retain_mut = "0.1.1" @@ -30,7 +31,7 @@ retain_mut = "0.1.1" [dev-dependencies] assert_matches = "1.3.0" codec = { package = "parity-scale-codec", version = "1.3.4" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../../test-utils/runtime" } +substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } criterion = "0.3" [[bench]] diff --git a/client/transaction-pool/graph/README.md b/client/transaction-pool/graph/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bc9cd929122f16d88af886f13ac8aa9c37bc460c --- /dev/null +++ b/client/transaction-pool/graph/README.md @@ -0,0 +1,8 @@ +Generic Transaction Pool + +The pool is based on dependency graph between transactions +and their priority. +The pool is able to return an iterator that traverses transaction +graph in the correct order taking into account priorities and dependencies. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index c6671fd5bd7f0cfbe13a99b99104de731a61cd55..853b66f6e74bbd973aa7360a96e0aabe8d8c1c78 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -168,23 +168,28 @@ where Client::Api: TaggedTransactionQueue, sp_api::ApiErrorFor: Send + std::fmt::Display, { - sp_tracing::enter_span!("validate_transaction"); - let runtime_api = client.runtime_api(); - let has_v2 = sp_tracing::tracing_span! { "check_version"; - runtime_api - .has_api_with::, _>(&at, |v| v >= 2) - .unwrap_or_default() - }; - - sp_tracing::enter_span!("runtime::validate_transaction"); - let res = if has_v2 { - runtime_api.validate_transaction(&at, source, uxt) - } else { - #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_2(&at, uxt) - }; - - res.map_err(|e| Error::RuntimeApi(e.to_string())) + sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; + { + let runtime_api = client.runtime_api(); + let has_v2 = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; + runtime_api + .has_api_with::, _>(&at, |v| v >= 2) + .unwrap_or_default() + }; + + let res = sp_tracing::within_span!( + sp_tracing::Level::TRACE, "runtime::validate_transaction"; + { + if has_v2 { + runtime_api.validate_transaction(&at, source, uxt) + } else { + #[allow(deprecated)] // old validate_transaction + runtime_api.validate_transaction_before_version_2(&at, uxt) + } + }); + + res.map_err(|e| Error::RuntimeApi(e.to_string())) + }) } impl FullChainApi diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 719059477e13e4010167ec015625fddd738aec59..1582eee5d9265d206c6ffa00f8c9ef6119118c35 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,11 +6,99 @@ The format is based on [Keep a Changelog]. ## Unreleased +## 2.0.0-rc6 -> 2.0.0 – two dot 😮 + +Runtime +------- + +* Rename `ModuleToIndex` to `PalletRuntimeSetup` (#7148) +* Bounties (#5715) +* pallet-collective: allow customized default vote (#6984) +* add instantiable support for treasury pallet (#7058) +* frame/authority-discovery: Have authorities() return both current and next (#6788) +* add generated weight info for pallet-collective (#6789) +* Support Staking Payout to Any Account (#6832) +* Time-delay proxies (#6770) +* Refcounts are now u32 (#7164) + Client ------ +* Rename `inspect-key` to `inspect` (#7160) +* Send import notification always for re-orgs (#7118) +* Allow remotes to not open a legacy substream (#7075) +* Fix `storage::read` (#7084) +* Support hex encoded secret key for `--node-key` (#7052) +* Update the service tasks Grafana dashboard (#7038) +* manual seal is now consensus agnostic (#7010) +* Move subcommands from sc-cli to nodes (#6948) +* Implement request-responses protocols (#6634) +* fix bench db wipe (#6965) +* Fix benchmark read/write key tracker for keys in child storages. (#6905) +* *: Update to next libp2p version 0.24.0 (#6891) + +API +--- + +* grandpa-rpc: use FinalityProofProvider to check finality for rpc (#6215) +* pow: replace the thread-base mining loop with a future-based mining worker (#7060) +* Tracing for wasm with bridging to native (#6916) +* Frame-support storage: make iterations and translate consistent (#5470) +* pow: support uniform tie breaking in fork choice (#7073) +* Make decoding of `compact` saturating instead of invalid (#7062) +* Set reserved nodes with offchain worker. (#6996) +* client/*: Treat protocol name as str and not [u8] (#6967) +* Add a `LightSyncState` field to the chain spec (#6894) +* *: Update to next libp2p version 0.24.0 (#6891) + +Runtime Migrations +------------------ + +* Time-delay proxies (#6770) + + +## 2.0.0-rc5 -> 2.0.0-rc6 – Rock Hyrax + +Runtime +------- + +* Custom Codec Implenetation for NPoS Election (#6720) +* Successful `note_imminent_preimage` is free (#6793) +* pallet-democracy use of weightinfo (#6783) +* Update Balances Pallet to use `WeightInfo` (#6610) +* pallet-evm: add builtin support for the four basic Ethereum precompiles (#6743) +* Allow `PostDispatchInfo` to disable fees (#6749) +* pallet-evm: add support for tuple-based precompile declarations (#6681) +* grandpa: allow noting that the set has stalled (#6725) + +Client +------ + +* Merge Subkey into sc-cli (#4954) +* RpcHandlers Refactorings (#6846) +* client/authority-discovery: Introduce AuthorityDiscoveryService (#6760) +* Implement tracing::Event handling & parent_id for spans and events (#6672) +* Move to upstream wasmtime, refactor globals snapshot (#6759) +* Revalidate transactions only on latest best block (#6824) +* Allow task manager to have children (#6771) +* client/network: Expose DHT query duration to Prometheus (#6784) +* client/network: Add peers to DHT only if protocols match (#6549) +* Name all the tasks! (#6726) * Child nodes can be handled by adding a child `TaskManager` to the parent's `TaskManager` (#6771) +API +--- + +* pow: add access to pre-digest for algorithm verifiers (#6900) +* babe, aura, pow: only call check_inherents if authoring version is compatible (#6862) +* Implement 'transactional' annotation for runtime functions. (#6763) +* seal: Change prefix and module name from "ext_" to "seal_" for contract callable functions (#6798) +* Add Subscription RPC for Grandpa Finality (#5732) +* seal: Fix and improve error reporting (#6773) +* Allow blacklisting blocks from being finalized again after block revert (#6301) +* BABE slot and epoch event notifications (#6563) +* Add `memory-tracker` feature to `sp-trie` to fix wasm panic (#6745) + ## 2.0.0-rc4 -> 2.0.0-rc5 – River Dolphin Runtime diff --git a/docs/README.adoc b/docs/README.adoc index d1daeed07b5dc2c47bc934d4265d09d35bc48ea9..7f3d50faac7d61b471637050f14df1911f5dc207 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -318,10 +318,10 @@ we support multiple environment variables: for `cargo check` runs. * `WASM_BUILD_TYPE` - Sets the build type for building WASM binaries. Supported values are `release` or `debug`. By default the build type is equal to the build type used by the main build. -* `TRIGGER_WASM_BUILD` - Can be set to trigger a WASM build. On subsequent calls the value of the variable - needs to change. As WASM builder instructs `cargo` to watch for file changes - this environment variable should only be required in certain circumstances. -* `WASM_TARGET_DIRECTORY` - Will copy any build WASM binary to the given directory. The path needs +* `FORCE_WASM_BUILD` - Can be set to force a WASM build. On subsequent calls the value of the variable + needs to change. As WASM builder instructs `cargo` to watch for file changes + this environment variable should only be required in certain circumstances. +* `WASM_TARGET_DIRECTORY` - Will copy release build WASM binary to the given directory. The path needs to be absolute. * `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. * `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 47bf0cecf21172389ca1307b19dcd97d251d036c..d1742e567cfac2d56c3a5bf02763dd347afb13f9 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-assets" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME asset management pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-std = { version = "2.0.0", path = "../../primitives/std" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/assets/README.md b/frame/assets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6b3fe21e52775b05a8e63f49c6ecd4e33b6a77f5 --- /dev/null +++ b/frame/assets/README.md @@ -0,0 +1,116 @@ +# Assets Module + +A simple, secure module for dealing with fungible assets. + +## Overview + +The Assets module provides functionality for asset management of fungible asset classes +with a fixed supply, including: + +* Asset Issuance +* Asset Transfer +* Asset Destruction + +To use it in your runtime, you need to implement the assets [`Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). + +The supported dispatchable functions are documented in the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. + +### Terminology + +* **Asset issuance:** The creation of a new asset, whose total supply will belong to the + account that issues the asset. +* **Asset transfer:** The action of transferring assets from one account to another. +* **Asset destruction:** The process of an account removing its entire holding of an asset. +* **Fungible asset:** An asset whose units are interchangeable. +* **Non-fungible asset:** An asset for which each unit has unique characteristics. + +### Goals + +The assets system in Substrate is designed to make the following possible: + +* Issue a unique asset to its creator's account. +* Move assets between accounts. +* Remove an account's balance of an asset when requested by that account's owner and update + the asset's total supply. + +## Interface + +### Dispatchable Functions + +* `issue` - Issues the total supply of a new fungible asset to the account of the caller of the function. +* `transfer` - Transfers an `amount` of units of fungible asset `id` from the balance of +the function caller's account (`origin`) to a `target` account. +* `destroy` - Destroys the entire holding of a fungible asset `id` associated with the account +that called the function. + +Please refer to the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum and its associated variants for documentation on each function. + +### Public Functions + + +* `balance` - Get the asset `id` balance of `who`. +* `total_supply` - Get the total supply of an asset `id`. + +Please refer to the [`Module`](https://docs.rs/pallet-assets/latest/pallet_assets/struct.Module.html) struct for details on publicly available functions. + +## Usage + +The following example shows how to use the Assets module in your runtime by exposing public functions to: + +* Issue a new fungible asset for a token distribution event (airdrop). +* Query the fungible asset holding balance of an account. +* Query the total supply of a fungible asset that has been issued. + +### Prerequisites + +Import the Assets module and types and derive your runtime's configuration traits from the Assets module trait. + +### Simple Code Snippet + +```rust +use pallet_assets as assets; +use frame_support::{decl_module, dispatch, ensure}; +use frame_system::ensure_signed; + +pub trait Trait: assets::Trait { } + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { + let sender = ensure_signed(origin).map_err(|e| e.as_str())?; + + const ACCOUNT_ALICE: u64 = 1; + const ACCOUNT_BOB: u64 = 2; + const COUNT_AIRDROP_RECIPIENTS: u64 = 2; + const TOKENS_FIXED_SUPPLY: u64 = 100; + + ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), "Divide by zero error."); + + let asset_id = Self::next_asset_id(); + + >::mutate(|asset_id| *asset_id += 1); + >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); + >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); + >::insert(asset_id, TOKENS_FIXED_SUPPLY); + + Self::deposit_event(RawEvent::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY)); + Ok(()) + } + } +} +``` + +## Assumptions + +Below are assumptions that must be held when using this module. If any of +them are violated, the behavior of this module is undefined. + +* The total count of assets should be less than + `Trait::AssetId::max_value()`. + +## Related Modules + +* [`System`](https://docs.rs/frame-system/latest/frame_system/) +* [`Support`](https://docs.rs/frame-support/latest/frame_support/) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 79bc9136ef4a70d737d2a4e59fe6448c58c971af..e5ad2ae352eb8b73bd4322e922e4e4c63e470b5e 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -230,11 +230,11 @@ decl_event! { ::Balance, ::AssetId, { - /// Some assets were issued. [asset_id, owner, total_supply] + /// Some assets were issued. \[asset_id, owner, total_supply\] Issued(AssetId, AccountId, Balance), - /// Some assets were transferred. [asset_id, from, to, amount] + /// Some assets were transferred. \[asset_id, from, to, amount\] Transferred(AssetId, AccountId, AccountId, Balance), - /// Some assets were destroyed. [asset_id, owner, balance] + /// Some assets were destroyed. \[asset_id, owner, balance\] Destroyed(AssetId, AccountId, Balance), } } @@ -319,7 +319,7 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index 704d22ba780d5f74b3a01a72df9176dfe4df1bcb..a65632289426bafd5e46ed76f4ac0e3555f653dc 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-atomic-swap" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME atomic swap pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/atomic-swap/README.md b/frame/atomic-swap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1287e90bc0da54c93dd6e3056d986c4c00a04ea2 --- /dev/null +++ b/frame/atomic-swap/README.md @@ -0,0 +1,23 @@ +# Atomic Swap + +A module for atomically sending funds. + +- [`atomic_swap::Trait`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/enum.Call.html) +- [`Module`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/struct.Module.html) + +## Overview + +A module for atomically sending funds from an origin to a target. A proof +is used to allow the target to approve (claim) the swap. If the swap is not +claimed within a specified duration of time, the sender may cancel it. + +## Interface + +### Dispatchable Functions + +* `create_swap` - called by a sender to register a new atomic swap +* `claim_swap` - called by the target to approve a swap +* `cancel_swap` - may be called by a sender after a specified duration + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 65794792d0aa4ca1368b825e94e2c7eda49c4e7c..31f0c0f42652519a4cda25ac5cec1a21279204d8 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -189,12 +189,12 @@ decl_event!( AccountId = ::AccountId, PendingSwap = PendingSwap, { - /// Swap created. [account, proof, swap] + /// Swap created. \[account, proof, swap\] NewSwap(AccountId, HashedProof, PendingSwap), /// Swap claimed. The last parameter indicates whether the execution succeeds. - /// [account, proof, success] + /// \[account, proof, success\] SwapClaimed(AccountId, HashedProof, bool), - /// Swap cancelled. [account, proof] + /// Swap cancelled. \[account, proof\] SwapCancelled(AccountId, HashedProof), } ); diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 6690a24d364d6eaf9a3f34f9990bc4106584dfaa..060411c8815da66996a57661233777cc73efa75a 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -45,7 +45,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -55,6 +55,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type DustRemoval = (); type Event = (); diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index cc9b1bf6f6602989c8d7befcc1ab61070cda5bc0..27a579e0f9f8b14b725ebea23cdd6140ff2f91f7 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -1,34 +1,35 @@ [package] name = "pallet-aura" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME AURA consensus pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -sp-consensus-aura = { version = "0.8.0-rc5", path = "../../primitives/consensus/aura", default-features = false } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/timestamp" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } +pallet-session = { version = "2.0.0", default-features = false, path = "../session" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +sp-consensus-aura = { version = "0.8.0", path = "../../primitives/consensus/aura", default-features = false } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-io ={ version = "2.0.0", path = "../../primitives/io" } lazy_static = "1.4.0" parking_lot = "0.10.0" diff --git a/frame/aura/README.md b/frame/aura/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4f3eacbad8a061114309d642c7346acc829393a8 --- /dev/null +++ b/frame/aura/README.md @@ -0,0 +1,28 @@ +# Aura Module + +- [`aura::Trait`](https://docs.rs/pallet-aura/latest/pallet_aura/trait.Trait.html) +- [`Module`](https://docs.rs/pallet-aura/latest/pallet_aura/struct.Module.html) + +## Overview + +The Aura module extends Aura consensus by managing offline reporting. + +## Interface + +### Public Functions + +- `slot_duration` - Determine the Aura slot-duration based on the Timestamp module configuration. + +## Related Modules + +- [Timestamp](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/): The Timestamp module is used in Aura to track +consensus rounds (via `slots`). + +## References + +If you're interested in hacking on this module, it is useful to understand the interaction with +`substrate/primitives/inherents/src/lib.rs` and, specifically, the required implementation of +[`ProvideInherent`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherent.html) and +[`ProvideInherentData`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherentData.html) to create and check inherents. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 9277cb14f3002487197f6073357552dd009104ee..a3875727e47c20112d728930b3a1618293284783 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -66,7 +66,7 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index bfd7017d06f3021b94c2a11fcd21fffe5d7a96b8..0e1db74632786b71c5ed67ea1773eb4176d5004d 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -1,31 +1,32 @@ [package] name = "pallet-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for authority discovery" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-authority-discovery = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/authority-discovery" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } +sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../primitives/authority-discovery" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc5", features = ["historical" ], path = "../session", default-features = false } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0", features = ["historical" ], path = "../session", default-features = false } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } [features] default = ["std"] diff --git a/frame/authority-discovery/README.md b/frame/authority-discovery/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9a534dcbeb6f8f98ec2260176ca4075f81179194 --- /dev/null +++ b/frame/authority-discovery/README.md @@ -0,0 +1,6 @@ +# Authority discovery module. + +This module is used by the `client/authority-discovery` to retrieve the +current set of authorities. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 55e32b21dcb94ccf152cbe5c70c8ff0d222c314c..09be533474fca225c0055747f1713d62274c85a5 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -23,7 +23,7 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; use frame_support::{decl_module, decl_storage}; use sp_authority_discovery::AuthorityId; @@ -32,7 +32,7 @@ pub trait Trait: frame_system::Trait + pallet_session::Trait {} decl_storage! { trait Store for Module as AuthorityDiscovery { - /// Keys of the current authority set. + /// Keys of the current and next authority set. Keys get(fn keys): Vec; } add_extra_genesis { @@ -47,7 +47,7 @@ decl_module! { } impl Module { - /// Retrieve authority identifiers of the current authority set. + /// Retrieve authority identifiers of the current and next authority set. pub fn authorities() -> Vec { Keys::get() } @@ -71,17 +71,17 @@ impl pallet_session::OneSessionHandler for Module { where I: Iterator, { - let keys = authorities.map(|x| x.1).collect::>(); - Self::initialize_keys(&keys); + Self::initialize_keys(&authorities.map(|x| x.1).collect::>()); } - fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, queued_validators: I) where I: Iterator, { - // Remember who the authorities are for the new session. + // Remember who the authorities are for the new and next session. if changed { - Keys::put(validators.map(|x| x.1).collect::>()); + let keys = validators.chain(queued_validators).map(|x| x.1).collect::>(); + Keys::put(keys.into_iter().collect::>()); } } @@ -164,7 +164,7 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -192,12 +192,13 @@ mod tests { } #[test] - fn authorities_returns_current_authority_set() { - // The whole authority discovery module ignores account ids, but we still need it for - // `pallet_session::OneSessionHandler::on_new_session`, thus its safe to use the same value everywhere. + fn authorities_returns_current_and_next_authority_set() { + // The whole authority discovery module ignores account ids, but we still need them for + // `pallet_session::OneSessionHandler::on_new_session`, thus its safe to use the same value + // everywhere. let account_id = AuthorityPair::from_seed_slice(vec![10; 32].as_ref()).unwrap().public(); - let first_authorities: Vec = vec![0, 1].into_iter() + let mut first_authorities: Vec = vec![0, 1].into_iter() .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); @@ -206,12 +207,21 @@ mod tests { .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); - // Needed for `pallet_session::OneSessionHandler::on_new_session`. - let second_authorities_and_account_ids: Vec<(&AuthorityId, AuthorityId)> = second_authorities.clone() + let second_authorities_and_account_ids = second_authorities.clone() .into_iter() .map(|id| (&account_id, id)) + .collect:: >(); + + let mut third_authorities: Vec = vec![4, 5].into_iter() + .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) + .map(AuthorityId::from) .collect(); + // Needed for `pallet_session::OneSessionHandler::on_new_session`. + let third_authorities_and_account_ids = third_authorities.clone() + .into_iter() + .map(|id| (&account_id, id)) + .collect:: >(); // Build genesis. let mut t = frame_system::GenesisConfig::default() @@ -233,23 +243,55 @@ mod tests { AuthorityDiscovery::on_genesis_session( first_authorities.iter().map(|id| (id, id.clone())) ); - assert_eq!(first_authorities, AuthorityDiscovery::authorities()); + first_authorities.sort(); + let mut authorities_returned = AuthorityDiscovery::authorities(); + authorities_returned.sort(); + assert_eq!(first_authorities, authorities_returned); // When `changed` set to false, the authority set should not be updated. AuthorityDiscovery::on_new_session( false, second_authorities_and_account_ids.clone().into_iter(), - vec![].into_iter(), + third_authorities_and_account_ids.clone().into_iter(), + ); + let mut authorities_returned = AuthorityDiscovery::authorities(); + authorities_returned.sort(); + assert_eq!( + first_authorities, + authorities_returned, + "Expected authority set not to change as `changed` was set to false.", ); - assert_eq!(first_authorities, AuthorityDiscovery::authorities()); // When `changed` set to true, the authority set should be updated. AuthorityDiscovery::on_new_session( true, second_authorities_and_account_ids.into_iter(), - vec![].into_iter(), + third_authorities_and_account_ids.clone().into_iter(), + ); + let mut second_and_third_authorities = second_authorities.iter() + .chain(third_authorities.iter()) + .cloned() + .collect::>(); + second_and_third_authorities.sort(); + assert_eq!( + second_and_third_authorities, + AuthorityDiscovery::authorities(), + "Expected authority set to contain both the authorities of the new as well as the \ + next session." + ); + + // With overlapping authority sets, `authorities()` should return a deduplicated set. + AuthorityDiscovery::on_new_session( + true, + third_authorities_and_account_ids.clone().into_iter(), + third_authorities_and_account_ids.clone().into_iter(), + ); + third_authorities.sort(); + assert_eq!( + third_authorities, + AuthorityDiscovery::authorities(), + "Expected authority set to be deduplicated." ); - assert_eq!(second_authorities, AuthorityDiscovery::authorities()); }); } } diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 9ddd24888ca986bc04f3dbb6318c0fdb8f949103..e8b6444583821cfc2705cb4b19965a3dd606b4c1 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -1,29 +1,30 @@ [package] name = "pallet-authorship" -version = "2.0.0-rc5" +version = "2.0.0" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-authorship = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/authorship" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-authorship = { version = "2.0.0", default-features = false, path = "../../primitives/authorship" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "2.0.0", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/authorship/README.md b/frame/authorship/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d61747da3e101b4989b517a77c166cf98cbce6fe --- /dev/null +++ b/frame/authorship/README.md @@ -0,0 +1,5 @@ +Authorship tracking for FRAME runtimes. + +This tracks the current author of the block and recent uncles. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 91cad247cac578247104075f27cb7ffaa4ac1135..0a10c8849571b4477daf2d6eea59db7338841c23 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -438,7 +438,7 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index e2577e8daf11e4e06707654fba6f41bfd9e02ff1..a210a2a8ef06a35198056fd094ec6febcb45359b 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -1,43 +1,44 @@ [package] name = "pallet-babe" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Consensus extension module for BABE consensus. Collects on-chain randomness from VRF outputs and manages epoch transitions." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } +pallet-session = { version = "2.0.0", default-features = false, path = "../session" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } serde = { version = "1.0.101", optional = true } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-babe = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-vrf = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/vrf" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/timestamp" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/babe" } +sp-consensus-vrf = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/vrf" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } [dev-dependencies] -frame-benchmarking = { version = "2.0.0-rc5", path = "../benchmarking" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-offences = { version = "2.0.0-rc5", path = "../offences" } -pallet-staking = { version = "2.0.0-rc5", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../staking/reward-curve" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } +pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-offences = { version = "2.0.0", path = "../offences" } +pallet-staking = { version = "2.0.0", path = "../staking" } +pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/babe/README.md b/frame/babe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6f20be89efc0cd57bcf4ab1a082f1f5198e42fa7 --- /dev/null +++ b/frame/babe/README.md @@ -0,0 +1,4 @@ +Consensus extension module for BABE consensus. Collects on-chain randomness +from VRF outputs and manages epoch transitions. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..a0e13781961cc0a5ad37c8d5b50801542a582657 --- /dev/null +++ b/frame/babe/src/default_weights.rs @@ -0,0 +1,47 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights for the Babe Pallet +//! This file was not auto-generated. + +use frame_support::weights::{ + Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, +}; + +impl crate::WeightInfo for () { + fn report_equivocation(validator_count: u32) -> Weight { + // we take the validator set count from the membership proof to + // calculate the weight but we set a floor of 100 validators. + let validator_count = validator_count.max(100) as u64; + + // worst case we are considering is that the given offender + // is backed by 200 nominators + const MAX_NOMINATORS: u64 = 200; + + // checking membership proof + (35 * WEIGHT_PER_MICROS) + .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + .saturating_add(DbWeight::get().reads(5)) + // check equivocation proof + .saturating_add(110 * WEIGHT_PER_MICROS) + // report offence + .saturating_add(110 * WEIGHT_PER_MICROS) + .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) + .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) + } +} diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index f80ac186434d0796387439899f8993eaadaa41f5..3f13fe7e03002c8a692460d34d0b7d8da29d524d 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -24,8 +24,9 @@ use codec::{Decode, Encode}; use frame_support::{ decl_error, decl_module, decl_storage, + dispatch::DispatchResultWithPostInfo, traits::{FindAuthor, Get, KeyOwnerProofSystem, Randomness as RandomnessT}, - weights::Weight, + weights::{Pays, Weight}, Parameter, }; use frame_system::{ensure_none, ensure_signed}; @@ -50,6 +51,7 @@ use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInhe pub use sp_consensus_babe::{AuthorityId, PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH}; mod equivocation; +mod default_weights; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; @@ -101,6 +103,12 @@ pub trait Trait: pallet_timestamp::Trait { /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime /// definition. type HandleEquivocation: HandleEquivocation; + + type WeightInfo: WeightInfo; +} + +pub trait WeightInfo { + fn report_equivocation(validator_count: u32) -> Weight; } /// Trigger an epoch change, if any should take place. @@ -255,19 +263,19 @@ decl_module! { /// the equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence will /// be reported. - #[weight = weight::weight_for_report_equivocation::()] + #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation( origin, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, - ) { + ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; Self::do_report_equivocation( Some(reporter), equivocation_proof, key_owner_proof, - )?; + ) } /// Report authority equivocation/misbehavior. This method will verify @@ -278,44 +286,23 @@ decl_module! { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. - #[weight = weight::weight_for_report_equivocation::()] + #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation_unsigned( origin, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, - ) { + ) -> DispatchResultWithPostInfo { ensure_none(origin)?; Self::do_report_equivocation( T::HandleEquivocation::block_author(), equivocation_proof, key_owner_proof, - )?; + ) } } } -mod weight { - use frame_support::{ - traits::Get, - weights::{constants::WEIGHT_PER_MICROS, Weight}, - }; - - pub fn weight_for_report_equivocation() -> Weight { - // checking membership proof - (35 * WEIGHT_PER_MICROS) - .saturating_add(T::DbWeight::get().reads(5)) - // check equivocation proof - .saturating_add(110 * WEIGHT_PER_MICROS) - // report offence - .saturating_add(110 * WEIGHT_PER_MICROS) - // worst case we are considering is that the given offender - // is backed by 200 nominators - .saturating_add(T::DbWeight::get().reads(14 + 3 * 200)) - .saturating_add(T::DbWeight::get().writes(10 + 3 * 200)) - } -} - impl RandomnessT<::Hash> for Module { /// Some BABE blocks have VRF outputs where the block producer has exactly one bit of influence, /// either they make the block or they do not make the block and thus someone else makes the @@ -626,13 +613,13 @@ impl Module { reporter: Option, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, - ) -> Result<(), Error> { + ) -> DispatchResultWithPostInfo { let offender = equivocation_proof.offender.clone(); let slot_number = equivocation_proof.slot_number; // validate the equivocation proof if !sp_consensus_babe::check_equivocation_proof(equivocation_proof) { - return Err(Error::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()); } let validator_set_count = key_owner_proof.validator_count(); @@ -644,13 +631,13 @@ impl Module { // check that the slot number is consistent with the session index // in the key ownership proof (i.e. slot is for that epoch) if epoch_index != session_index { - return Err(Error::InvalidKeyOwnershipProof.into()); + return Err(Error::::InvalidKeyOwnershipProof.into()); } // check the membership proof and extract the offender's id let key = (sp_consensus_babe::KEY_TYPE, offender); let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof) - .ok_or(Error::InvalidKeyOwnershipProof)?; + .ok_or(Error::::InvalidKeyOwnershipProof)?; let offence = BabeEquivocationOffence { slot: slot_number, @@ -665,9 +652,10 @@ impl Module { }; T::HandleEquivocation::report_offence(reporters, offence) - .map_err(|_| Error::DuplicateOffenceReport)?; + .map_err(|_| Error::::DuplicateOffenceReport)?; - Ok(()) + // waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) } /// Submits an extrinsic to report an equivocation. This method will create diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 8a0356d8da7e8596dec5b155522fd0f7b5e78a43..3adad1c4bf794d9d8d11c9ebc93424f4e3dc1b0f 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -86,7 +86,7 @@ impl frame_system::Trait for Test { type MaximumExtrinsicWeight = MaximumBlockWeight; type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -152,6 +152,7 @@ parameter_types! { } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u128; type DustRemoval = (); type Event = (); @@ -229,7 +230,6 @@ impl pallet_offences::Trait for Test { type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; - type WeightInfo = (); } impl Trait for Test { @@ -248,6 +248,7 @@ impl Trait for Test { )>>::IdentificationTuple; type HandleEquivocation = super::EquivocationHandler; + type WeightInfo = (); } pub type Balances = pallet_balances::Module; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index bdd6748c3b3516c33c3b3db1420a5cda7b07f1bc..6cfa7e41dff88af8b8f4036922bfa9cbb2ea93ff 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -21,6 +21,7 @@ use super::{Call, *}; use frame_support::{ assert_err, assert_ok, traits::{Currency, OnFinalize}, + weights::{GetDispatchInfo, Pays}, }; use mock::*; use pallet_session::ShouldEndSession; @@ -585,3 +586,84 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ); }); } + +#[test] +fn report_equivocation_has_valid_weight() { + // the weight depends on the size of the validator set, + // but there's a lower bound of 100 validators. + assert!( + (1..=100) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] == w[1]) + ); + + // after 100 validators the weight should keep increasing + // with every extra validator. + assert!( + (100..=1000) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] < w[1]) + ); +} + +#[test] +fn valid_equivocation_reports_dont_pay_fees() { + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + ext.execute_with(|| { + start_era(1); + + let offending_authority_pair = &pairs[0]; + + // generate an equivocation proof. + let equivocation_proof = + generate_equivocation_proof(0, &offending_authority_pair, CurrentSlot::get()); + + // create the key ownership proof. + let key_owner_proof = Historical::prove(( + sp_consensus_babe::KEY_TYPE, + &offending_authority_pair.public(), + )) + .unwrap(); + + // check the dispatch info for the call. + let info = Call::::report_equivocation_unsigned( + equivocation_proof.clone(), + key_owner_proof.clone(), + ) + .get_dispatch_info(); + + // it should have non-zero weight and the fee has to be paid. + assert!(info.weight > 0); + assert_eq!(info.pays_fee, Pays::Yes); + + // report the equivocation. + let post_info = Babe::report_equivocation_unsigned( + Origin::none(), + equivocation_proof.clone(), + key_owner_proof.clone(), + ) + .unwrap(); + + // the original weight should be kept, but given that the report + // is valid the fee is waived. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::No); + + // report the equivocation again which is invalid now since it is + // duplicate. + let post_info = + Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) + .err() + .unwrap() + .post_info; + + // the fee is not waived and the original weight is kept. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::Yes); + }) +} diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index e6c8eec65ded29c8aedc1545c1ea34f93b890ad9..21c8abbc24a6c50491bab87feb244d59d8a3667d 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-balances" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage balances" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../transaction-payment" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } [features] default = ["std"] diff --git a/frame/balances/README.md b/frame/balances/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4104fdc64197568fdc97f0141a1e84c2cffb05c8 --- /dev/null +++ b/frame/balances/README.md @@ -0,0 +1,122 @@ +# Balances Module + +The Balances module provides functionality for handling accounts and balances. + +- [`balances::Trait`](https://docs.rs/pallet-balances/latest/pallet_balances/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-balances/latest/pallet_balances/enum.Call.html) +- [`Module`](https://docs.rs/pallet-balances/latest/pallet_balances/struct.Module.html) + +## Overview + +The Balances module provides functions for: + +- Getting and setting free balances. +- Retrieving total, reserved and unreserved balances. +- Repatriating a reserved balance to a beneficiary account that exists. +- Transferring a balance between accounts (when not reserved). +- Slashing an account balance. +- Account creation and removal. +- Managing total issuance. +- Setting and managing locks. + +### Terminology + +- **Existential Deposit:** The minimum balance required to create or keep an account open. This prevents +"dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) + fall below this, then the account is said to be dead; and it loses its functionality as well as any + prior history and all information on it is removed from the chain's state. + No account should ever have a total balance that is strictly between 0 and the existential + deposit (exclusive). If this ever happens, it indicates either a bug in this module or an + erroneous raw mutation of storage. + +- **Total Issuance:** The total number of units in existence in a system. + +- **Reaping an account:** The act of removing an account by resetting its nonce. Happens after its +total balance has become zero (or, strictly speaking, less than the Existential Deposit). + +- **Free Balance:** The portion of a balance that is not reserved. The free balance is the only + balance that matters for most operations. + +- **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. + Reserved balance can still be slashed, but only after all the free balance has been slashed. + +- **Imbalance:** A condition when some funds were credited or debited without equal and opposite accounting +(i.e. a difference between total issuance and account balances). Functions that result in an imbalance will +return an object of the `Imbalance` trait that can be managed within your runtime logic. (If an imbalance is +simply dropped, it should automatically maintain any book-keeping such as total issuance.) + +- **Lock:** A freeze on a specified amount of an account's free balance until a specified block number. Multiple +locks always operate over the same funds, so they "overlay" rather than "stack". + +### Implementations + +The Balances module provides implementations for the following traits. If these traits provide the functionality +that you need, then you can avoid coupling with the Balances module. + +- [`Currency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.Currency.html): Functions for dealing with a +fungible assets system. +- [`ReservableCurrency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.ReservableCurrency.html): +Functions for dealing with assets that can be reserved from an account. +- [`LockableCurrency`](https://docs.rs/frame-support/latest/frame_support/traits/trait.LockableCurrency.html): Functions for +dealing with accounts that allow liquidity restrictions. +- [`Imbalance`](https://docs.rs/frame-support/latest/frame_support/traits/trait.Imbalance.html): Functions for handling +imbalances between total issuance in the system and account balances. Must be used when a function +creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). +- [`IsDeadAccount`](https://docs.rs/frame-system/latest/frame_system/trait.IsDeadAccount.html): Determiner to say whether a +given account is unused. + +## Interface + +### Dispatchable Functions + +- `transfer` - Transfer some liquid free balance to another account. +- `set_balance` - Set the balances of a given account. The origin of this call must be root. + +## Usage + +The following examples show how to use the Balances module in your custom module. + +### Examples from the FRAME + +The Contract module uses the `Currency` trait to handle gas payment, and its types inherit from `Currency`: + +```rust +use frame_support::traits::Currency; + +pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; + +``` + +The Staking module uses the `LockableCurrency` trait to lock a stash account's funds: + +```rust +use frame_support::traits::{WithdrawReasons, LockableCurrency}; +use sp_runtime::traits::Bounded; +pub trait Trait: frame_system::Trait { + type Currency: LockableCurrency; +} + +fn update_ledger( + controller: &T::AccountId, + ledger: &StakingLedger +) { + T::Currency::set_lock( + STAKING_ID, + &ledger.stash, + ledger.total, + WithdrawReasons::all() + ); + // >::insert(controller, ledger); // Commented out as we don't have access to Staking's storage here. +} +``` + +## Genesis config + +The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-balances/latest/pallet_balances/struct.GenesisConfig.html). + +## Assumptions + +* Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 73547fe814aa2d50a4e689cc4e26a8c70f864ff6..21f43c7c6364083ed7a8bc747e31e3b64d7b9dbd 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Balances; @@ -40,7 +40,7 @@ benchmarks! { // * Transfer will create the recipient account. transfer { let existential_deposit = T::ExistentialDeposit::get(); - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); // Give some multiple of the existential deposit + creation fee + transfer fee let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); @@ -60,7 +60,7 @@ benchmarks! { // * Both accounts exist and will continue to exist. #[extra] transfer_best_case { - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); @@ -80,7 +80,7 @@ benchmarks! { // Benchmark `transfer_keep_alive` with the worst possible condition: // * The recipient account is created. transfer_keep_alive { - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index c6b43677f2e59c801b07d1642b580757bfc7d3c7..422e112bdf276e0509536f8d672dd3b0961e6ccd 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -177,7 +177,6 @@ use sp_runtime::{ }, }; use frame_system::{self as system, ensure_signed, ensure_root}; - pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; pub trait WeightInfo { @@ -201,6 +200,10 @@ pub trait Subtrait: frame_system::Trait { /// Weight information for the extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// The maximum number of locks that should exist on an account. + /// Not strictly enforced, but used for weight estimation. + type MaxLocks: Get; } pub trait Trait: frame_system::Trait { @@ -222,6 +225,10 @@ pub trait Trait: frame_system::Trait { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// The maximum number of locks that should exist on an account. + /// Not strictly enforced, but used for weight estimation. + type MaxLocks: Get; } impl, I: Instance> Subtrait for T { @@ -229,6 +236,7 @@ impl, I: Instance> Subtrait for T { type ExistentialDeposit = T::ExistentialDeposit; type AccountStore = T::AccountStore; type WeightInfo = >::WeightInfo; + type MaxLocks = T::MaxLocks; } decl_event!( @@ -236,24 +244,24 @@ decl_event!( ::AccountId, >::Balance { - /// An account was created with some free balance. [account, free_balance] + /// An account was created with some free balance. \[account, free_balance\] Endowed(AccountId, Balance), /// An account was removed whose balance was non-zero but below ExistentialDeposit, - /// resulting in an outright loss. [account, balance] + /// resulting in an outright loss. \[account, balance\] DustLost(AccountId, Balance), - /// Transfer succeeded. [from, to, value] + /// Transfer succeeded. \[from, to, value\] Transfer(AccountId, AccountId, Balance), - /// A balance was set by root. [who, free, reserved] + /// A balance was set by root. \[who, free, reserved\] BalanceSet(AccountId, Balance, Balance), - /// Some amount was deposited (e.g. for transaction fees). [who, deposit] + /// Some amount was deposited (e.g. for transaction fees). \[who, deposit\] Deposit(AccountId, Balance), - /// Some balance was reserved (moved from free to reserved). [who, value] + /// Some balance was reserved (moved from free to reserved). \[who, value\] Reserved(AccountId, Balance), - /// Some balance was unreserved (moved from reserved to free). [who, value] + /// Some balance was unreserved (moved from reserved to free). \[who, value\] Unreserved(AccountId, Balance), /// Some balance was moved from the reserve of the first account to the second account. /// Final argument indicates the destination balance type. - /// [from, to, balance, destination_status] + /// \[from, to, balance, destination_status\] ReserveRepatriated(AccountId, AccountId, Balance, Status), } ); @@ -664,6 +672,12 @@ impl, I: Instance> Module { /// Update the account entry for `who`, given the locks. fn update_locks(who: &T::AccountId, locks: &[BalanceLock]) { + if locks.len() as u32 > T::MaxLocks::get() { + frame_support::debug::warn!( + "Warning: A user has more currency locks than expected. \ + A runtime configuration adjustment may be needed." + ); + } Self::mutate_account(who, |b| { b.misc_frozen = Zero::zero(); b.fee_frozen = Zero::zero(); @@ -888,7 +902,7 @@ impl, I: Instance> frame_system::Trait for ElevatedTrait { type MaximumBlockLength = T::MaximumBlockLength; type AvailableBlockRatio = T::AvailableBlockRatio; type Version = T::Version; - type ModuleToIndex = T::ModuleToIndex; + type PalletInfo = T::PalletInfo; type OnNewAccount = T::OnNewAccount; type OnKilledAccount = T::OnKilledAccount; type AccountData = T::AccountData; @@ -901,6 +915,7 @@ impl, I: Instance> Trait for ElevatedTrait { type ExistentialDeposit = T::ExistentialDeposit; type AccountStore = T::AccountStore; type WeightInfo = >::WeightInfo; + type MaxLocks = T::MaxLocks; } impl, I: Instance> Currency for Module where @@ -1286,6 +1301,8 @@ where { type Moment = T::BlockNumber; + type MaxLocks = T::MaxLocks; + // Set a lock on the balance of `who`. // Is a no-op if lock amount is zero or `reasons` `is_none()`. fn set_lock( diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 8e764112ba24ccf135148378be62f28bc8ab3428..0ee488d097294d9de8a1f9e3327a1c87b78e678c 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -87,7 +87,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = super::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -103,12 +103,14 @@ impl pallet_transaction_payment::Trait for Test { type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } + impl Trait for Test { type Balance = u64; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = system::Module; + type MaxLocks = (); type WeightInfo = (); } diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 86abc2b6044ce4f6802b794968140c25e3b72ba4..4efcdad8ca33467843a16a1f867acb1b4f0f7f2a 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -87,7 +87,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = super::AccountData; type OnNewAccount = (); type OnKilledAccount = Module; @@ -103,6 +103,9 @@ impl pallet_transaction_payment::Trait for Test { type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } +parameter_types! { + pub const MaxLocks: u32 = 50; +} impl Trait for Test { type Balance = u64; type DustRemoval = (); @@ -114,6 +117,7 @@ impl Trait for Test { system::CallKillAccount, u64, super::AccountData >; + type MaxLocks = MaxLocks; type WeightInfo = (); } diff --git a/frame/benchmark/Cargo.toml b/frame/benchmark/Cargo.toml deleted file mode 100644 index 43e131c2dc764d7870493ae3ca258db052706ce3..0000000000000000000000000000000000000000 --- a/frame/benchmark/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "pallet-benchmark" -version = "2.0.0-rc5" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Patterns to benchmark in a FRAME runtime." - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } - -[features] -default = ["std"] -std = [ - "serde", - "codec/std", - "sp-std/std", - "sp-io/std", - "sp-runtime/std", - "frame-support/std", - "frame-system/std", - "frame-benchmarking/std", -] -runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/benchmark/src/benchmarking.rs b/frame/benchmark/src/benchmarking.rs deleted file mode 100644 index ddf3df9eaad4cb5a367724a5b2b1508359856bc4..0000000000000000000000000000000000000000 --- a/frame/benchmark/src/benchmarking.rs +++ /dev/null @@ -1,134 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Benchmarks for common FRAME Pallet operations. - -#![cfg(feature = "runtime-benchmarks")] - -use super::*; - -use frame_system::RawOrigin; -use sp_std::prelude::*; -use frame_benchmarking::{benchmarks, account}; - -use crate::Module as Benchmark; - -const SEED: u32 = 0; - -benchmarks! { - _ { - let m in 1 .. 1000 => { - let origin = RawOrigin::Signed(account("member", m, SEED)); - Benchmark::::add_member_list(origin.into())? - }; - let i in 1 .. 1000 => { - MyMap::insert(i, i); - }; - let d in 1 .. 1000 => { - for i in 0..d { - for j in 0..100 { - MyDoubleMap::insert(i, j, d); - } - } - }; - } - - add_member_list { - let m in ...; - }: _(RawOrigin::Signed(account("member", m + 1, SEED))) - - append_member_list { - let m in ...; - }: _(RawOrigin::Signed(account("member", m + 1, SEED))) - - read_value { - let n in 1 .. 1000; - MyValue::put(n); - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - put_value { - let n in 1 .. 1000; - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - exists_value { - let n in 1 .. 1000; - MyValue::put(n); - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - remove_value { - let i in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), i) - - read_map { - let i in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), i) - - insert_map { - let n in 1 .. 1000; - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - contains_key_map { - let i in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), i) - - remove_prefix { - let d in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), d) - - do_nothing { - let n in 1 .. 1000; - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - encode_accounts { - let a in 1 .. 1000; - let mut accounts = Vec::new(); - for _ in 0..a { - accounts.push(account::("encode", a, SEED)); - } - }: _(RawOrigin::Signed(account("user", 0, SEED)), accounts) - - decode_accounts { - let a in 1 .. 1000; - let mut accounts = Vec::new(); - for _ in 0..a { - accounts.push(account::("encode", a, SEED)); - } - let bytes = accounts.encode(); - }: _(RawOrigin::Signed(account("user", 0, SEED)), bytes) - - // Custom implementation to handle benchmarking of storage recalculation. - // Puts `repeat` number of items into random storage keys, and then times how - // long it takes to recalculate the storage root. - storage_root { - let z in 0 .. 10000; - }: { - for index in 0 .. z { - let random = (index).using_encoded(sp_io::hashing::blake2_256); - sp_io::storage::set(&random, &random); - } - } - - // Custom implementation to handle benchmarking of calling a host function. - // Will check how long it takes to call `current_time()`. - current_time { - let z in 0 .. 1000; - }: { - for _ in 0 .. z { - let _ = frame_benchmarking::benchmarking::current_time(); - } - } -} diff --git a/frame/benchmark/src/lib.rs b/frame/benchmark/src/lib.rs deleted file mode 100644 index 422272f817c7a2f75c2b991a40a01929b4dc48b2..0000000000000000000000000000000000000000 --- a/frame/benchmark/src/lib.rs +++ /dev/null @@ -1,191 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A pallet that contains common runtime patterns in an isolated manner. -//! This pallet is **not** meant to be used in a production blockchain, just -//! for benchmarking and testing purposes. - -#![cfg_attr(not(feature = "std"), no_std)] - -use frame_support::{decl_module, decl_storage, decl_event, decl_error}; -use frame_support::traits::Currency; -use frame_system::{self as system, ensure_signed}; -use codec::{Encode, Decode}; -use sp_std::prelude::Vec; - -mod benchmarking; - -/// Type alias for currency balance. -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -/// The pallet's configuration trait. -pub trait Trait: system::Trait { - type Event: From> + Into<::Event>; - type Currency: Currency; -} - -// This pallet's storage items. -decl_storage! { - trait Store for Module as Benchmark { - MyMemberList: Vec; - MyMemberMap: map hasher(blake2_128_concat) T::AccountId => bool; - MyValue: u32; - MyMap: map hasher(twox_64_concat) u32 => u32; - MyDoubleMap: double_map hasher(twox_64_concat) u32, hasher(identity) u32 => u32; - } -} - -// The pallet's events -decl_event!( - pub enum Event where AccountId = ::AccountId { - Dummy(u32, AccountId), - } -); - -// The pallet's errors -decl_error! { - pub enum Error for Module { - } -} - -// The pallet's dispatchable functions. -decl_module! { - /// The module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Do nothing. - #[weight = 0] - pub fn do_nothing(_origin, input: u32) { - if input > 0 { - return Ok(()); - } - } - - /// Read a value from storage value `repeat` number of times. - /// Note the first `get()` read here will pull from the underlying - /// storage database, however, the `repeat` calls will all pull from the - /// storage overlay cache. You must consider this when analyzing the - /// results of the benchmark. - #[weight = 0] - pub fn read_value(_origin, repeat: u32) { - for _ in 0..repeat { - MyValue::get(); - } - } - - /// Put a value into a storage value. - #[weight = 0] - pub fn put_value(_origin, repeat: u32) { - for r in 0..repeat { - MyValue::put(r); - } - } - - /// Read a value from storage `repeat` number of times. - /// Note the first `exists()` read here will pull from the underlying - /// storage database, however, the `repeat` calls will all pull from the - /// storage overlay cache. You must consider this when analyzing the - /// results of the benchmark. - #[weight = 0] - pub fn exists_value(_origin, repeat: u32) { - for _ in 0..repeat { - MyValue::exists(); - } - } - - /// Remove a value from storage `repeat` number of times. - #[weight = 0] - pub fn remove_value(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::remove(r); - } - } - - /// Read a value from storage map `repeat` number of times. - #[weight = 0] - pub fn read_map(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::get(r); - } - } - - /// Insert a value into a map. - #[weight = 0] - pub fn insert_map(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::insert(r, r); - } - } - - /// Check is a map contains a value `repeat` number of times. - #[weight = 0] - pub fn contains_key_map(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::contains_key(r); - } - } - - /// Read a value from storage `repeat` number of times. - #[weight = 0] - pub fn remove_prefix(_origin, repeat: u32) { - for r in 0..repeat { - MyDoubleMap::remove_prefix(r); - } - } - - /// Add user to the list. - #[weight = 0] - pub fn add_member_list(origin) { - let who = ensure_signed(origin)?; - MyMemberList::::mutate(|x| x.push(who)); - } - - /// Append user to the list. - #[weight = 0] - pub fn append_member_list(origin) { - let who = ensure_signed(origin)?; - MyMemberList::::append(who); - } - - /// Encode a vector of accounts to bytes. - #[weight = 0] - pub fn encode_accounts(_origin, accounts: Vec) { - let bytes = accounts.encode(); - - // In an attempt to tell the compiler not to optimize away this benchmark, we will use - // the result of encoding the accounts. - if bytes.is_empty() { - frame_support::print("You are encoding zero accounts."); - } - } - - /// Decode bytes into a vector of accounts. - #[weight = 0] - pub fn decode_accounts(_origin, bytes: Vec) { - let accounts: Vec = Decode::decode(&mut bytes.as_slice()).map_err(|_| "Could not decode")?; - - // In an attempt to tell the compiler not to optimize away this benchmark, we will use - // the result of decoding the bytes. - if accounts.is_empty() { - frame_support::print("You are decoding zero bytes."); - } - } - } -} diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 37dcd85b5985f61ed75fd9ca9d1d3fe9e2e7b687..924ffc8627abc09e08fc8682098c4c4e6cbb527c 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "frame-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Macro for benchmarking a FRAME runtime." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,13 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] linregress = "0.1" paste = "0.1" codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-api = { version = "2.0.0-rc5", path = "../../primitives/api", default-features = false } -sp-runtime-interface = { version = "2.0.0-rc5", path = "../../primitives/runtime-interface", default-features = false } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime", default-features = false } -sp-std = { version = "2.0.0-rc5", path = "../../primitives/std", default-features = false } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-api = { version = "2.0.0", path = "../../primitives/api", default-features = false } +sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface", default-features = false } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "2.0.0", path = "../../primitives/std", default-features = false } +sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } +sp-storage = { version = "2.0.0", path = "../../primitives/storage", default-features = false } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } + +[dev-dependencies] +hex-literal = "0.3.1" [features] default = [ "std" ] diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bf4bf951aa2b201e331799c85b401e0ae0f643db --- /dev/null +++ b/frame/benchmarking/README.md @@ -0,0 +1,188 @@ +# Substrate Runtime Benchmarking Framework + +This crate contains a set of utilities that can be used to benchmark and weigh FRAME pallets that +you develop for your Substrate Runtime. + +## Overview + +Substrate's FRAME framework allows you to develop custom logic for your blockchain that can be +included in your runtime. This flexibility is key to help you design complex and interactive +pallets, but without accurate weights assigned to dispatchables, your blockchain may become +vulnerable to denial of service (DoS) attacks by malicious actors. + +The Substrate Runtime Benchmarking Framework is a tool you can use to mitigate DoS attacks against +your blockchain network by benchmarking the computational resources required to execute different +functions in the runtime, for example extrinsics, `on_initialize`, `verify_unsigned`, etc... + +The general philosophy behind the benchmarking system is: If your node can know ahead of time how +long it will take to execute an extrinsic, it can safely make decisions to include or exclude that +extrinsic based on its available resources. By doing this, it can keep the block production and +import process running smoothly. + +To achieve this, we need to model how long it takes to run each function in the runtime by: + +* Creating custom benchmarking logic that executes a specific code path of a function. +* Executing the benchmark in the Wasm execution environment, on a specific set of hardware, with a + custom runtime configuration, etc... +* Executing the benchmark across controlled ranges of possible values that may affect the result of + the benchmark (called "components"). +* Executing the benchmark multiple times at each point in order to isolate and remove outliers. +* Using the results of the benchmark to create a linear model of the function across its components. + +With this linear model, we are able to estimate ahead of time how long it takes to execute some +logic, and thus make informed decisions without actually spending any significant resources at +runtime. + +Note that we assume that all extrinsics are assumed to be of linear complexity, which is why we are +able to always fit them to a linear model. Quadratic or higher complexity functions are, in general, +considered to be dangerous to the runtime as the weight of these functions may explode as the +runtime state or input becomes too complex. + +The benchmarking framework comes with the following tools: + +* [A set of macros](./src/lib.rs) (`benchmarks!`, `add_benchmark!`, etc...) to make it easy to + write, test, and add runtime benchmarks. +* [A set of linear regression analysis functions](./src/analysis.rs) for processing benchmark data. +* [A CLI extension](../../utils/benchmarking-cli/) to make it easy to execute benchmarks on your + node. + +The end-to-end benchmarking pipeline is disabled by default when compiling a node. If you want to +run benchmarks, you need to enable it by compiling with a Rust feature flag `runtime-benchmarks`. +More details about this below. + +### Weight + +Substrate represents computational resources using a generic unit of measurement called "Weight". It +defines 10^12 Weight as 1 second of computation on the physical machine used for benchmarking. This +means that the weight of a function may change based on the specific hardware used to benchmark the +runtime functions. + +By modeling the expected weight of each runtime function, the blockchain is able to calculate how +many transactions or system level functions it will be able to execute within a certain period of +time. Often, the limiting factor for a blockchain is the fixed block production time for the +network. + +Within FRAME, each dispatchable function must have a `#[weight]` annotation with a function that can +return the expected weight for the worst case scenario execution of that function given its inputs. +This benchmarking framework will result in a file that automatically generates those formulas for +you, which you can then use in your pallet. + +## Writing Benchmarks + +Writing a runtime benchmark is much like writing a unit test for your pallet. It needs to be +carefully crafted to execute a certain logical path in your code. In tests you want to check for +various success and failure conditions, but with benchmarks you specifically look for the **most +computationally heavy** path, a.k.a the "worst case scenario". + +This means that if there are certain storage items or runtime state that may affect the complexity +of the function, for example triggering more iterations in a `for` loop, to get an accurate result, +you must set up your benchmark to trigger this. + +It may be that there are multiple paths your function can go down, and it is not clear which one is +the heaviest. In this case, you should just create a benchmark for each scenario! You may find that +there are paths in your code where complexity may become unbounded depending on user input. This may +be a hint that you should enforce sane boundaries for how a user can use your pallet. For example: +limiting the number of elements in a vector, limiting the number of iterations in a `for` loop, +etc... + +Examples of end-to-end benchmarks can be found in the [pallets provided by Substrate](../), and the +specific details on how to use the `benchmarks!` macro can be found in [its +documentation](./src/lib.rs). + +## Testing Benchmarks + +You can test your benchmarks using the same test runtime that you created for your pallet's unit +tests. By creating your benchmarks in the `benchmarks!` macro, it automatically generates test +functions for you: + +```rust +fn test_benchmark_[benchmark_name]::() -> Result<(), &'static str> +``` + +Simply add these functions to a unit test and ensure that the result of the function is `Ok(())`. + +> **Note:** If your test runtime and production runtime have different configurations, you may get +different results when testing your benchmark and actually running it. + +In general, benchmarks returning `Ok(())` is all you need to check for since it signals the executed +extrinsic has completed successfully. However, you can optionally include a `verify` block with your +benchmark, which can additionally verify any final conditions, such as the final state of your +runtime. + +These additional `verify` blocks will not affect the results of your final benchmarking process. + +To run the tests, you need to enable the `runtime-benchmarks` feature flag. This may also mean you +need to move into your node's binary folder. For example, with the Substrate repository, this is how +you would test the Balances pallet's benchmarks: + +```bash +cd bin/node/cli +cargo test -p pallet-balances --features runtime-benchmarks +``` + +## Adding Benchmarks + +The benchmarks included with each pallet are not automatically added to your node. To actually +execute these benchmarks, you need to implement the `frame_benchmarking::Benchmark` trait. You can +see an example of how to do this in the [included Substrate +node](../../bin/node/runtime/src/lib.rs). + +Assuming there are already some benchmarks set up on your node, you just need to add another +instance of the `add_benchmark!` macro: + +```rust +/// configuration for running benchmarks +/// | name of your pallet's crate (as imported) +/// v v +add_benchmark!(params, batches, pallet_balances, Balances); +/// ^ ^ +/// where all benchmark results are saved | +/// the `struct` created for your pallet by `construct_runtime!` +``` + +Once you have done this, you will need to compile your node binary with the `runtime-benchmarks` +feature flag: + +```bash +cd bin/node/cli +cargo build --release --features runtime-benchmarks +``` + +## Running Benchmarks + +Finally, once you have a node binary with benchmarks enabled, you need to execute your various +benchmarks. + +You can get a list of the available benchmarks by running: + +```bash +./target/release/substrate benchmark --chain dev --pallet "*" --extrinsic "*" --repeat 0 +``` + +Then you can run a benchmark like so: + +```bash +./target/release/substrate benchmark \ + --chain dev \ # Configurable Chain Spec + --execution=wasm \ # Always test with Wasm + --wasm-execution=compiled \ # Always used `wasm-time` + --pallet pallet_balances \ # Select the pallet + --extrinsic transfer \ # Select the extrinsic + --steps 50 \ # Number of samples across component ranges + --repeat 20 \ # Number of times we repeat a benchmark + --output \ # Output benchmark results into a Rust file +``` + +This will output a file `pallet_name.rs` which implements the `WeightInfo` trait you should include +in your pallet. Each blockchain should generate their own benchmark file with their custom +implementation of the `WeightInfo` trait. This means that you will be able to use these modular +Substrate pallets while still keeping your network safe for your specific configuration and +requirements. + +To get a full list of available options when running benchmarks, run: + +```bash +./target/release/substrate benchmark --help +``` + +License: Apache-2.0 diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 6963d84ee614e20f94906099244d439092d86358..dafb4a74b669fef3c90330acae1ae32e27d1712f 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tools for analysing the benchmark results. +//! Tools for analyzing the benchmark results. use std::collections::BTreeMap; use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel}; diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 7ef274f25b15725e5395fdb1231078dd4340cd4f..b189cdb6e705e9ed8dc7ce68421c50f271b404eb 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -32,6 +32,7 @@ pub use sp_io::storage::root as storage_root; pub use sp_runtime::traits::Zero; pub use frame_support; pub use paste; +pub use sp_storage::TrackedStorageKey; /// Construct pallet benchmarks for weighing dispatchables. /// @@ -157,7 +158,7 @@ pub use paste; /// } /// ``` /// -/// These `verify` blocks will not execute when running your actual benchmarks! +/// These `verify` blocks will not affect your benchmark results! /// /// You can construct benchmark tests like so: /// @@ -418,156 +419,220 @@ macro_rules! benchmarks_iter { #[doc(hidden)] macro_rules! benchmark_backend { // parsing arms - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( PRE { $( $pre_parsed:tt )* } )* - } { $eval:block } { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( PRE { $( $pre_parsed:tt )* } )* } + { $eval:block } + { let $pre_id:tt : $pre_ty:ty = $pre_ex:expr; $( $rest:tt )* - } $postcode:block) => { + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { $( $common )* } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( $common )* } + { $( PRE { $( $pre_parsed )* } )* PRE { $pre_id , $pre_ty , $pre_ex } - } { $eval } { $( $rest )* } $postcode + } + { $eval } + { $( $rest )* } + $postcode } }; - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in ( $param_from:expr ) .. $param_to:expr => $param_instancer:expr; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in ( $param_from:expr ) .. $param_to:expr => $param_instancer:expr; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { $( $common )* } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* PARAM { $param , $param_from , $param_to , $param_instancer } - } { $eval } { $( $rest )* } $postcode + } + { $eval } + { $( $rest )* } + $postcode } }; // mutation arm to look after defaulting to a common param - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in ...; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in ...; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { - $( { $common , $common_from , $common_to , $common_instancer } )* - } { - $( $parsed )* - } { $eval } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( { $common , $common_from , $common_to , $common_instancer } )* } + { $( $parsed )* } + { $eval } + { let $param in ({ $( let $common = $common_from; )* $param }) .. ({ $( let $common = $common_to; )* $param }) => ({ $( let $common = || -> Result<(), &'static str> { $common_instancer ; Ok(()) }; )* $param()? }); $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after defaulting only the range to common param - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in _ .. _ => $param_instancer:expr ; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in _ .. _ => $param_instancer:expr ; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { - { $( $instance)? } $name { $( $where_clause )* } { - $( { $common , $common_from , $common_to , $common_instancer } )* - } { - $( $parsed )* - } { $eval } { + { $( $instance)? } + $name + { $( $where_clause )* } + { $( { $common , $common_from , $common_to , $common_instancer } )* } + { $( $parsed )* } + { $eval } + { let $param in ({ $( let $common = $common_from; )* $param }) .. ({ $( let $common = $common_to; )* $param }) => $param_instancer ; $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after a single tt for param_from. - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in $param_from:tt .. $param_to:expr => $param_instancer:expr ; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in $param_from:tt .. $param_to:expr => $param_instancer:expr ; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { { $( $instance)? } - $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* } + { $eval } + { let $param in ( $param_from ) .. $param_to => $param_instancer; $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after the default tail of `=> ()` - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $param:ident in $param_from:tt .. $param_to:expr; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $param:ident in $param_from:tt .. $param_to:expr; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { { $( $instance)? } - $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* } + { $eval } + { let $param in $param_from .. $param_to => (); $( $rest )* - } $postcode + } + $postcode } }; // mutation arm to look after `let _ =` - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( $common:tt )* - } { - $( $parsed:tt )* - } { $eval:block } { - let $pre_id:tt = $pre_ex:expr; - $( $rest:tt )* - } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( $common:tt )* } + { $( $parsed:tt )* } + { $eval:block } + { + let $pre_id:tt = $pre_ex:expr; + $( $rest:tt )* + } + $postcode:block + ) => { $crate::benchmark_backend! { { $( $instance)? } - $name { $( $where_clause )* } { $( $common )* } { $( $parsed )* } { $eval } { + $name + { $( $where_clause )* } + { $( $common )* } + { $( $parsed )* } + { $eval } + { let $pre_id : _ = $pre_ex; $( $rest )* - } $postcode + } + $postcode } }; // actioning arm - ( { $( $instance:ident )? } $name:ident { - $( $where_clause:tt )* - } { - $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* - } { - $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* - $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* - } { $eval:block } { $( $post:tt )* } $postcode:block) => { + ( + { $( $instance:ident )? } + $name:ident + { $( $where_clause:tt )* } + { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } + { + $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* + $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* + } + { $eval:block } + { $( $post:tt )* } + $postcode:block + ) => { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] @@ -583,30 +648,11 @@ macro_rules! benchmark_backend { ] } - fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - $( - let $common = $common_from; - )* - $( - // Prepare instance - let $param = components.iter() - .find(|&c| c.0 == $crate::BenchmarkParameter::$param) - .unwrap().1; - )* - $( - let $pre_id : $pre_ty = $pre_ex; - )* - $( $param_instancer ; )* - $( $post )* - - Ok(Box::new(move || -> Result<(), &'static str> { $eval; Ok(()) })) - } - - fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { + fn instance( + &self, + components: &[($crate::BenchmarkParameter, u32)], + verify: bool + ) -> Result Result<(), &'static str>>, &'static str> { $( let $common = $common_from; )* @@ -614,7 +660,8 @@ macro_rules! benchmark_backend { // Prepare instance let $param = components.iter() .find(|&c| c.0 == $crate::BenchmarkParameter::$param) - .unwrap().1; + .ok_or("Could not find component in benchmark preparation.")? + .1; )* $( let $pre_id : $pre_ty = $pre_ex; @@ -622,7 +669,13 @@ macro_rules! benchmark_backend { $( $param_instancer ; )* $( $post )* - Ok(Box::new(move || -> Result<(), &'static str> { $eval; $postcode; Ok(()) })) + Ok(Box::new(move || -> Result<(), &'static str> { + $eval; + if verify { + $postcode; + } + Ok(()) + })) } } }; @@ -671,26 +724,16 @@ macro_rules! selected_benchmark { } } - fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { + fn instance( + &self, + components: &[($crate::BenchmarkParameter, u32)], + verify: bool + ) -> Result Result<(), &'static str>>, &'static str> { match self { $( Self::$bench => < $bench as $crate::BenchmarkingSetup - >::instance(&$bench, components), - )* - } - } - - fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result Result<(), &'static str>>, &'static str> - { - match self { - $( - Self::$bench => < - $bench as $crate::BenchmarkingSetup - >::verify(&$bench, components), + >::instance(&$bench, components, verify), )* } } @@ -726,7 +769,8 @@ macro_rules! impl_benchmark { highest_range_values: &[u32], steps: &[u32], repeat: u32, - whitelist: &[Vec] + whitelist: &[$crate::TrackedStorageKey], + verify: bool, ) -> Result, &'static str> { // Map the input to the selected benchmark. let extrinsic = sp_std::str::from_utf8(extrinsic) @@ -735,9 +779,19 @@ macro_rules! impl_benchmark { $( stringify!($name) => SelectedBenchmark::$name, )* _ => return Err("Could not find extrinsic."), }; + let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); + if repeat == 0 { + return Ok(results); + } - // Add whitelist to DB - $crate::benchmarking::set_whitelist(whitelist.to_vec()); + // Add whitelist to DB including whitelisted caller + let mut whitelist = whitelist.to_vec(); + let whitelisted_caller_key = + as frame_support::storage::StorageMap<_,_>>::hashed_key_for( + $crate::whitelisted_caller::() + ); + whitelist.push(whitelisted_caller_key.into()); + $crate::benchmarking::set_whitelist(whitelist); // Warm up the DB $crate::benchmarking::commit_db(); @@ -746,15 +800,15 @@ macro_rules! impl_benchmark { let components = < SelectedBenchmark as $crate::BenchmarkingSetup >::components(&selected_benchmark); - let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); // Default number of steps for a component. let mut prev_steps = 10; let repeat_benchmark = | repeat: u32, - c: Vec<($crate::BenchmarkParameter, u32)>, + c: &[($crate::BenchmarkParameter, u32)], results: &mut Vec<$crate::BenchmarkResults>, + verify: bool, | -> Result<(), &'static str> { // Run the benchmark `repeat` times. for _ in 0..repeat { @@ -762,7 +816,7 @@ macro_rules! impl_benchmark { // benchmark. let closure_to_benchmark = < SelectedBenchmark as $crate::BenchmarkingSetup - >::instance(&selected_benchmark, &c)?; + >::instance(&selected_benchmark, c, verify)?; // Set the block number to at least 1 so events are deposited. if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { @@ -776,43 +830,49 @@ macro_rules! impl_benchmark { // Reset the read/write counter so we don't count operations in the setup process. $crate::benchmarking::reset_read_write_count(); - // Time the extrinsic logic. - frame_support::debug::trace!( - target: "benchmark", - "Start Benchmark: {:?}", c - ); - - let start_extrinsic = $crate::benchmarking::current_time(); - closure_to_benchmark()?; - let finish_extrinsic = $crate::benchmarking::current_time(); - let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - // Commit the changes to get proper write count - $crate::benchmarking::commit_db(); - frame_support::debug::trace!( - target: "benchmark", - "End Benchmark: {} ns", elapsed_extrinsic - ); - let read_write_count = $crate::benchmarking::read_write_count(); - frame_support::debug::trace!( - target: "benchmark", - "Read/Write Count {:?}", read_write_count - ); - - // Time the storage root recalculation. - let start_storage_root = $crate::benchmarking::current_time(); - $crate::storage_root(); - let finish_storage_root = $crate::benchmarking::current_time(); - let elapsed_storage_root = finish_storage_root - start_storage_root; - - results.push($crate::BenchmarkResults { - components: c.clone(), - extrinsic_time: elapsed_extrinsic, - storage_root_time: elapsed_storage_root, - reads: read_write_count.0, - repeat_reads: read_write_count.1, - writes: read_write_count.2, - repeat_writes: read_write_count.3, - }); + if verify { + closure_to_benchmark()?; + } else { + // Time the extrinsic logic. + frame_support::debug::trace!( + target: "benchmark", + "Start Benchmark: {:?}", c + ); + + let start_extrinsic = $crate::benchmarking::current_time(); + + closure_to_benchmark()?; + + let finish_extrinsic = $crate::benchmarking::current_time(); + let elapsed_extrinsic = finish_extrinsic - start_extrinsic; + // Commit the changes to get proper write count + $crate::benchmarking::commit_db(); + frame_support::debug::trace!( + target: "benchmark", + "End Benchmark: {} ns", elapsed_extrinsic + ); + let read_write_count = $crate::benchmarking::read_write_count(); + frame_support::debug::trace!( + target: "benchmark", + "Read/Write Count {:?}", read_write_count + ); + + // Time the storage root recalculation. + let start_storage_root = $crate::benchmarking::current_time(); + $crate::storage_root(); + let finish_storage_root = $crate::benchmarking::current_time(); + let elapsed_storage_root = finish_storage_root - start_storage_root; + + results.push($crate::BenchmarkResults { + components: c.to_vec(), + extrinsic_time: elapsed_extrinsic, + storage_root_time: elapsed_storage_root, + reads: read_write_count.0, + repeat_reads: read_write_count.1, + writes: read_write_count.2, + repeat_writes: read_write_count.3, + }); + } // Wipe the DB back to the genesis state. $crate::benchmarking::wipe_db(); @@ -822,7 +882,11 @@ macro_rules! impl_benchmark { }; if components.is_empty() { - repeat_benchmark(repeat, Default::default(), &mut results)?; + if verify { + // If `--verify` is used, run the benchmark once to verify it would complete. + repeat_benchmark(1, Default::default(), &mut Vec::new(), true)?; + } + repeat_benchmark(repeat, Default::default(), &mut results, false)?; } else { // Select the component we will be benchmarking. Each component will be benchmarked. for (idx, (name, low, high)) in components.iter().enumerate() { @@ -858,7 +922,11 @@ macro_rules! impl_benchmark { ) .collect(); - repeat_benchmark(repeat, c, &mut results)?; + if verify { + // If `--verify` is used, run the benchmark once to verify it would complete. + repeat_benchmark(1, &c, &mut Vec::new(), true)?; + } + repeat_benchmark(repeat, &c, &mut results, false)?; } } } @@ -891,17 +959,17 @@ macro_rules! impl_benchmark_test { let execute_benchmark = | c: Vec<($crate::BenchmarkParameter, u32)> | -> Result<(), &'static str> { - // Set up the verification state + // Set up the benchmark, return execution + verification function. let closure_to_verify = < SelectedBenchmark as $crate::BenchmarkingSetup - >::verify(&selected_benchmark, &c)?; + >::instance(&selected_benchmark, &c, true)?; // Set the block number to at least 1 so events are deposited. if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { frame_system::Module::::set_block_number(1.into()); } - // Run verification + // Run execution + verification closure_to_verify()?; // Reset the state @@ -944,22 +1012,28 @@ macro_rules! impl_benchmark_test { /// First create an object that holds in the input parameters for the benchmark: /// /// ```ignore -/// let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); +/// let params = (&config, &whitelist); /// ``` /// -/// The `whitelist` is a `Vec>` of storage keys that you would like to skip for DB tracking. For example: +/// The `whitelist` is a parameter you pass to control the DB read/write tracking. +/// We use a vector of [TrackedStorageKey](./struct.TrackedStorageKey.html), which is a simple struct used to set +/// if a key has been read or written to. /// -/// ```ignore -/// let whitelist: Vec> = vec![ +/// For values that should be skipped entirely, we can just pass `key.into()`. For example: +/// +/// ``` +/// use frame_benchmarking::TrackedStorageKey; +/// let whitelist: Vec = vec![ /// // Block Number -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), /// // Total Issuance -/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), +/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), /// // Execution Phase -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), /// // Event Count -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), +/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), /// ]; +/// ``` /// /// Then define a mutable local variable to hold your `BenchmarkBatch` object: /// @@ -982,18 +1056,29 @@ macro_rules! impl_benchmark_test { macro_rules! add_benchmark { ( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => ( let name_string = stringify!($name).as_bytes(); - let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat, whitelist, extra) = $params; + let (config, whitelist) = $params; + let $crate::BenchmarkConfig { + pallet, + benchmark, + lowest_range_values, + highest_range_values, + steps, + repeat, + verify, + extra, + } = config; if &pallet[..] == &name_string[..] || &pallet[..] == &b"*"[..] { if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { - for benchmark in $( $location )*::benchmarks(extra).into_iter() { + for benchmark in $( $location )*::benchmarks(*extra).into_iter() { $batches.push($crate::BenchmarkBatch { results: $( $location )*::run_benchmark( benchmark, &lowest_range_values[..], &highest_range_values[..], &steps[..], - repeat, + *repeat, whitelist, + *verify, )?, pallet: name_string.to_vec(), benchmark: benchmark.to_vec(), @@ -1006,8 +1091,9 @@ macro_rules! add_benchmark { &lowest_range_values[..], &highest_range_values[..], &steps[..], - repeat, + *repeat, whitelist, + *verify, )?, pallet: name_string.to_vec(), benchmark: benchmark.clone(), diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 6a4dc7eee4ee7e2c0d032805770ad67bfc89c17a..0429d98e18618dab5294b34dca56b8a5141a1ce1 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -20,7 +20,6 @@ #![cfg(test)] use super::*; -use codec::Decode; use sp_std::prelude::*; use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}}; use frame_support::{ @@ -64,12 +63,10 @@ pub trait OtherTrait { type OtherEvent; } -pub trait Trait: OtherTrait where Self::OtherEvent: Into { +pub trait Trait: frame_system::Trait + OtherTrait + where Self::OtherEvent: Into<::Event> +{ type Event; - type BlockNumber; - type AccountId: 'static + Default + Decode; - type Origin: From> + - Into, Self::Origin>>; } #[derive(Clone, Eq, PartialEq)] @@ -96,7 +93,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = (); type AvailableBlockRatio = (); type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -105,9 +102,6 @@ impl frame_system::Trait for Test { impl Trait for Test { type Event = (); - type BlockNumber = u32; - type Origin = Origin; - type AccountId = u64; } impl OtherTrait for Test { @@ -182,10 +176,11 @@ fn benchmarks_macro_works() { let closure = >::instance( &selected, &[(BenchmarkParameter::b, 1)], + true, ).expect("failed to create closure"); new_test_ext().execute_with(|| { - assert_eq!(closure(), Ok(())); + assert_ok!(closure()); }); } @@ -199,6 +194,7 @@ fn benchmarks_macro_rename_works() { let closure = >::instance( &selected, &[(BenchmarkParameter::b, 1)], + true, ).expect("failed to create closure"); new_test_ext().execute_with(|| { @@ -216,9 +212,10 @@ fn benchmarks_macro_works_for_non_dispatchable() { let closure = >::instance( &selected, &[(BenchmarkParameter::x, 1)], + true, ).expect("failed to create closure"); - assert_eq!(closure(), Ok(())); + assert_ok!(closure()); } #[test] @@ -226,14 +223,28 @@ fn benchmarks_macro_verify_works() { // Check postcondition for benchmark `set_value` is valid. let selected = SelectedBenchmark::set_value; - let closure = >::verify( + let closure = >::instance( &selected, &[(BenchmarkParameter::b, 1)], + true, ).expect("failed to create closure"); new_test_ext().execute_with(|| { assert_ok!(closure()); }); + + // Check postcondition for benchmark `bad_verify` is invalid. + let selected = SelectedBenchmark::bad_verify; + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::x, 10000)], + true, + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_err!(closure(), "You forgot to sort!"); + }); } #[test] diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 5a2bd55ff79aa15b51ad9a7511e73d2f5051d3c0..347334e24d5f6d5904693604fa4a609d8924a7e8 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -21,6 +21,7 @@ use codec::{Encode, Decode}; use sp_std::{vec::Vec, prelude::Box}; use sp_io::hashing::blake2_256; use sp_runtime::RuntimeString; +use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] @@ -62,19 +63,32 @@ pub struct BenchmarkResults { pub repeat_writes: u32, } +/// Configuration used to setup and run runtime benchmarks. +#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] +pub struct BenchmarkConfig { + /// The encoded name of the pallet to benchmark. + pub pallet: Vec, + /// The encoded name of the benchmark/extrinsic to run. + pub benchmark: Vec, + /// An optional manual override to the lowest values used in the `steps` range. + pub lowest_range_values: Vec, + /// An optional manual override to the highest values used in the `steps` range. + pub highest_range_values: Vec, + /// The number of samples to take across the range of values for components. + pub steps: Vec, + /// The number of times to repeat a benchmark. + pub repeat: u32, + /// Enable an extra benchmark iteration which runs the verification logic for a benchmark. + pub verify: bool, + /// Enable benchmarking of "extra" extrinsics, i.e. those that are not directly used in a pallet. + pub extra: bool, +} + sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. pub trait Benchmark { /// Dispatch the given benchmark. - fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, - extra: bool, - ) -> Result, RuntimeString>; + fn dispatch_benchmark(config: BenchmarkConfig) -> Result, RuntimeString>; } } @@ -101,19 +115,52 @@ pub trait Benchmarking { self.commit() } - /// Get the read/write count + /// Get the read/write count. fn read_write_count(&self) -> (u32, u32, u32, u32) { self.read_write_count() } - /// Reset the read/write count + /// Reset the read/write count. fn reset_read_write_count(&mut self) { self.reset_read_write_count() } - fn set_whitelist(&mut self, new: Vec>) { + /// Get the DB whitelist. + fn get_whitelist(&self) -> Vec { + self.get_whitelist() + } + + /// Set the DB whitelist. + fn set_whitelist(&mut self, new: Vec) { self.set_whitelist(new) } + + // Add a new item to the DB whitelist. + fn add_to_whitelist(&mut self, add: TrackedStorageKey) { + let mut whitelist = self.get_whitelist(); + match whitelist.iter_mut().find(|x| x.key == add.key) { + // If we already have this key in the whitelist, update to be the most constrained value. + Some(item) => { + *item = TrackedStorageKey { + key: add.key, + has_been_read: item.has_been_read || add.has_been_read, + has_been_written: item.has_been_written || add.has_been_written, + } + }, + // If the key does not exist, add it. + None => { + whitelist.push(add); + } + } + self.set_whitelist(whitelist); + } + + // Remove an item from the DB whitelist. + fn remove_from_whitelist(&mut self, remove: Vec) { + let mut whitelist = self.get_whitelist(); + whitelist.retain(|x| x.key != remove); + self.set_whitelist(whitelist); + } } /// The pallet benchmarking trait. @@ -141,7 +188,8 @@ pub trait Benchmarking { highest_range_values: &[u32], steps: &[u32], repeat: u32, - whitelist: &[Vec] + whitelist: &[TrackedStorageKey], + verify: bool, ) -> Result, &'static str>; } @@ -154,10 +202,11 @@ pub trait BenchmarkingSetup { fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; /// Set up the storage, and prepare a closure to run the benchmark. - fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; - - /// Set up the storage, and prepare a closure to test and verify the benchmark - fn verify(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; + fn instance( + &self, + components: &[(BenchmarkParameter, u32)], + verify: bool + ) -> Result Result<(), &'static str>>, &'static str>; } /// Grab an account, seeded by a name and index. @@ -165,3 +214,8 @@ pub fn account(name: &'static str, index: u32, seed let entropy = (name, index, seed).using_encoded(blake2_256); AccountId::decode(&mut &entropy[..]).unwrap_or_default() } + +/// This caller account is automatically whitelisted for DB reads/writes by the benchmarking macro. +pub fn whitelisted_caller() -> AccountId { + account::("whitelisted_caller", 0, 0) +} diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index b1db6fe6b97c8469d82f10f611e9a5cffc838e34..fd302fb836579a6fe17634407628ddea39baca7c 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-collective" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Collective system: Members of a set of account IDs can make their collective feelings known through dispatched calls from one of two specialized origins." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,17 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +hex-literal = "0.3.1" +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/collective/README.md b/frame/collective/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f62df65f728cdcd2687d8cc89ac03f2324eac833 --- /dev/null +++ b/frame/collective/README.md @@ -0,0 +1,25 @@ +Collective system: Members of a set of account IDs can make their collective feelings known +through dispatched calls from one of two specialized origins. + +The membership can be provided in one of two ways: either directly, using the Root-dispatchable +function `set_members`, or indirectly, through implementing the `ChangeMembers`. +The pallet assumes that the amount of members stays at or below `MaxMembers` for its weight +calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. + +A "prime" member may be set to help determine the default vote behavior based on chain +config. If `PreimDefaultVote` is used, the prime vote acts as the default vote in case of any +abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then +abstentations will first follow the majority of the collective voting, and then the prime +member. + +Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a +number of approvals required for it to pass and be called. Motions are open for members to +vote on for a minimum period given by `MotionDuration`. As soon as the needed number of +approvals is given, the motion is closed and executed. If the number of approvals is not reached +during the voting period, then `close` may be called by any account in order to force the end +the motion explicitly. If a prime member is defined then their vote is used in place of any +abstentions and the proposal is executed if there are enough approvals counting the new votes. + +If there are not, or if no prime is set, then the motion is dropped without being executed. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index b9558d8c8ce1c54badba18e5de6558f13591d54d..d4e80d515941fea3b02d588177f3f0108c71195e 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::RawOrigin as SystemOrigin; use frame_system::EventRecord; -use frame_benchmarking::{benchmarks_instance, account}; +use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use sp_std::mem::size_of; @@ -45,8 +45,8 @@ benchmarks_instance! { _{ } set_members { - let m in 1 .. MAX_MEMBERS; - let n in 1 .. MAX_MEMBERS; + let m in 1 .. T::MaxMembers::get(); + let n in 1 .. T::MaxMembers::get(); let p in 1 .. T::MaxProposals::get(); // Set old members. @@ -63,7 +63,7 @@ benchmarks_instance! { SystemOrigin::Root.into(), old_members.clone(), Some(last_old_member.clone()), - MAX_MEMBERS, + T::MaxMembers::get(), )?; // Set a high threshold for proposals passing so that they stay around. @@ -104,15 +104,15 @@ benchmarks_instance! { new_members.push(last_member.clone()); } - }: _(SystemOrigin::Root, new_members.clone(), Some(last_member), MAX_MEMBERS) + }: _(SystemOrigin::Root, new_members.clone(), Some(last_member), T::MaxMembers::get()) verify { new_members.sort(); assert_eq!(Collective::::members(), new_members); } execute { - let m in 1 .. MAX_MEMBERS; let b in 1 .. MAX_BYTES; + let m in 1 .. T::MaxMembers::get(); let bytes_in_storage = b + size_of::() as u32; @@ -123,10 +123,10 @@ benchmarks_instance! { members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members, None, MAX_MEMBERS)?; + Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; let proposal: T::Proposal = SystemCall::::remark(vec![1; b as usize]).into(); @@ -141,8 +141,8 @@ benchmarks_instance! { // This tests when execution would happen immediately after proposal propose_execute { - let m in 1 .. MAX_MEMBERS; let b in 1 .. MAX_BYTES; + let m in 1 .. T::MaxMembers::get(); let bytes_in_storage = b + size_of::() as u32; @@ -153,10 +153,10 @@ benchmarks_instance! { members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members, None, MAX_MEMBERS)?; + Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; let proposal: T::Proposal = SystemCall::::remark(vec![1; b as usize]).into(); let threshold = 1; @@ -172,9 +172,9 @@ benchmarks_instance! { // This tests when proposal is created and queued as "proposed" propose_proposed { - let m in 2 .. MAX_MEMBERS; - let p in 1 .. T::MaxProposals::get(); let b in 1 .. MAX_BYTES; + let m in 2 .. T::MaxMembers::get(); + let p in 1 .. T::MaxProposals::get(); let bytes_in_storage = b + size_of::() as u32; @@ -184,9 +184,9 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members, None, MAX_MEMBERS)?; + Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; let threshold = m; // Add previous proposals. @@ -215,7 +215,7 @@ benchmarks_instance! { vote { // We choose 5 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 5 .. MAX_MEMBERS; + let m in 5 .. T::MaxMembers::get(); let p = T::MaxProposals::get(); let b = MAX_BYTES; @@ -231,7 +231,7 @@ benchmarks_instance! { } let voter: T::AccountId = account("voter", 0, SEED); members.push(voter.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, MAX_MEMBERS)?; + Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; // Threshold is 1 less than the number of members so that one person can vote nay let threshold = m - 1; @@ -277,6 +277,9 @@ benchmarks_instance! { // Voter switches vote to nay, but does not kill the vote, just updates + inserts let approve = false; + // Whitelist voter account from further DB operations. + let voter_key = frame_system::Account::::hashed_key_for(&voter); + frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); }: _(SystemOrigin::Signed(voter), last_hash.clone(), index, approve) verify { // All proposals exist and the last proposal has just been updated. @@ -288,11 +291,11 @@ benchmarks_instance! { close_early_disapproved { // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. MAX_MEMBERS; + let m in 4 .. T::MaxMembers::get(); let p in 1 .. T::MaxProposals::get(); - let b in 1 .. MAX_BYTES; - let bytes_in_storage = b + size_of::() as u32; + let bytes = 100; + let bytes_in_storage = bytes + size_of::() as u32; // Construct `members`. let mut members = vec![]; @@ -304,7 +307,7 @@ benchmarks_instance! { } let voter: T::AccountId = account("voter", 0, SEED); members.push(voter.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, MAX_MEMBERS)?; + Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; // Threshold is total members so that one nay will disapprove the vote let threshold = m; @@ -313,7 +316,7 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); + let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; bytes as usize]).into(); Collective::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, @@ -356,6 +359,9 @@ benchmarks_instance! { approve, )?; + // Whitelist voter account from further DB operations. + let voter_key = frame_system::Account::::hashed_key_for(&voter); + frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. @@ -364,10 +370,10 @@ benchmarks_instance! { } close_early_approved { + let b in 1 .. MAX_BYTES; // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. MAX_MEMBERS; + let m in 4 .. T::MaxMembers::get(); let p in 1 .. T::MaxProposals::get(); - let b in 1 .. MAX_BYTES; let bytes_in_storage = b + size_of::() as u32; @@ -377,9 +383,9 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, MAX_MEMBERS)?; + Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; // Threshold is 2 so any two ayes will approve the vote let threshold = 2; @@ -446,11 +452,11 @@ benchmarks_instance! { close_disapproved { // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. MAX_MEMBERS; + let m in 4 .. T::MaxMembers::get(); let p in 1 .. T::MaxProposals::get(); - let b in 1 .. MAX_BYTES; - let bytes_in_storage = b + size_of::() as u32; + let bytes = 100; + let bytes_in_storage = bytes + size_of::() as u32; // Construct `members`. let mut members = vec![]; @@ -458,13 +464,13 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members( SystemOrigin::Root.into(), members.clone(), Some(caller.clone()), - MAX_MEMBERS, + T::MaxMembers::get(), )?; // Threshold is one less than total members so that two nays will disapprove the vote @@ -474,7 +480,7 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); + let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; bytes as usize]).into(); Collective::::propose( SystemOrigin::Signed(caller.clone()).into(), threshold, @@ -517,10 +523,10 @@ benchmarks_instance! { } close_approved { + let b in 1 .. MAX_BYTES; // We choose 4 as a minimum so we always trigger a vote in the voting loop (`for j in ...`) - let m in 4 .. MAX_MEMBERS; + let m in 4 .. T::MaxMembers::get(); let p in 1 .. T::MaxProposals::get(); - let b in 1 .. MAX_BYTES; let bytes_in_storage = b + size_of::() as u32; @@ -530,13 +536,13 @@ benchmarks_instance! { let member = account("member", i, SEED); members.push(member); } - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); Collective::::set_members( SystemOrigin::Root.into(), members.clone(), Some(caller.clone()), - MAX_MEMBERS, + T::MaxMembers::get(), )?; // Threshold is two, so any two ayes will pass the vote @@ -579,6 +585,54 @@ benchmarks_instance! { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(RawEvent::Executed(last_hash, Err(DispatchError::BadOrigin)).into()); } + + disapprove_proposal { + let p in 1 .. T::MaxProposals::get(); + + let m = 3; + let b = MAX_BYTES; + let bytes_in_storage = b + size_of::() as u32; + + // Construct `members`. + let mut members = vec![]; + for i in 0 .. m - 1 { + let member = account("member", i, SEED); + members.push(member); + } + let caller: T::AccountId = account("caller", 0, SEED); + members.push(caller.clone()); + Collective::::set_members( + SystemOrigin::Root.into(), + members.clone(), + Some(caller.clone()), + T::MaxMembers::get(), + )?; + + // Threshold is one less than total members so that two nays will disapprove the vote + let threshold = m - 1; + + // Add proposals + let mut last_hash = T::Hash::default(); + for i in 0 .. p { + // Proposals should be different so that different proposal hashes are generated + let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); + Collective::::propose( + SystemOrigin::Signed(caller.clone()).into(), + threshold, + Box::new(proposal.clone()), + bytes_in_storage, + )?; + last_hash = T::Hashing::hash_of(&proposal); + } + + System::::set_block_number(T::BlockNumber::max_value()); + assert_eq!(Collective::::proposals().len(), p as usize); + + }: _(SystemOrigin::Root, last_hash) + verify { + assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_last_event::(RawEvent::Disapproved(last_hash).into()); + } } #[cfg(test)] @@ -649,4 +703,11 @@ mod tests { assert_ok!(test_benchmark_close_approved::()); }); } + + #[test] + fn disapprove_proposal() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_disapprove_proposal::()); + }); + } } diff --git a/frame/collective/src/default_weight.rs b/frame/collective/src/default_weight.rs new file mode 100644 index 0000000000000000000000000000000000000000..bb6fe0ea25312d63e1f6b26fe7c56724734dc1d4 --- /dev/null +++ b/frame/collective/src/default_weight.rs @@ -0,0 +1,97 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights for the Collective Pallet +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn set_members(m: u32, n: u32, p: u32, ) -> Weight { + (0 as Weight) + .saturating_add((21040000 as Weight).saturating_mul(m as Weight)) + .saturating_add((173000 as Weight).saturating_mul(n as Weight)) + .saturating_add((31595000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn execute(b: u32, m: u32, ) -> Weight { + (43359000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add((123000 as Weight).saturating_mul(m as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn propose_execute(b: u32, m: u32, ) -> Weight { + (54134000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add((239000 as Weight).saturating_mul(m as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + } + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + (90650000 as Weight) + .saturating_add((5000 as Weight).saturating_mul(b as Weight)) + .saturating_add((152000 as Weight).saturating_mul(m as Weight)) + .saturating_add((970000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn vote(m: u32, ) -> Weight { + (74460000 as Weight) + .saturating_add((290000 as Weight).saturating_mul(m as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + (86360000 as Weight) + .saturating_add((232000 as Weight).saturating_mul(m as Weight)) + .saturating_add((954000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { + (123653000 as Weight) + .saturating_add((1000 as Weight).saturating_mul(b as Weight)) + .saturating_add((287000 as Weight).saturating_mul(m as Weight)) + .saturating_add((920000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_disapproved(m: u32, p: u32, ) -> Weight { + (95395000 as Weight) + .saturating_add((236000 as Weight).saturating_mul(m as Weight)) + .saturating_add((965000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { + (135284000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add((218000 as Weight).saturating_mul(m as Weight)) + .saturating_add((951000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn disapprove_proposal(p: u32, ) -> Weight { + (50500000 as Weight) + .saturating_add((966000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } +} diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 1edd8c75b90b0c1023e78d187be26d00cc0302e0..dd44f5e2aea9ec15f7205e4834761af8f4ff6907 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -20,11 +20,14 @@ //! //! The membership can be provided in one of two ways: either directly, using the Root-dispatchable //! function `set_members`, or indirectly, through implementing the `ChangeMembers`. -//! The pallet assumes that the amount of members stays at or below `MAX_MEMBERS` for its weight +//! The pallet assumes that the amount of members stays at or below `MaxMembers` for its weight //! calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. //! -//! A "prime" member may be set allowing their vote to act as the default vote in case of any -//! abstentions after the voting period. +//! A "prime" member may be set to help determine the default vote behavior based on chain +//! config. If `PreimDefaultVote` is used, the prime vote acts as the default vote in case of any +//! abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then +//! abstentations will first follow the majority of the collective voting, and then the prime +//! member. //! //! Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a //! number of approvals required for it to pass and be called. Motions are open for members to @@ -60,6 +63,8 @@ use frame_system::{self as system, ensure_signed, ensure_root}; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +mod default_weight; + /// Simple index type for proposal counting. pub type ProposalIndex = u32; @@ -69,35 +74,63 @@ pub type ProposalIndex = u32; /// vote exactly once, therefore also the number of votes for any given motion. pub type MemberCount = u32; -/// The maximum number of members supported by the pallet. Used for weight estimation. -/// -/// NOTE: -/// + Benchmarks will need to be re-run and weights adjusted if this changes. -/// + This pallet assumes that dependents keep to the limit without enforcing it. -pub const MAX_MEMBERS: MemberCount = 100; +/// Default voting strategy when a member is inactive. +pub trait DefaultVote { + /// Get the default voting strategy, given: + /// + /// - Whether the prime member voted Aye. + /// - Raw number of yes votes. + /// - Raw number of no votes. + /// - Total number of member count. + fn default_vote( + prime_vote: Option, + yes_votes: MemberCount, + no_votes: MemberCount, + len: MemberCount, + ) -> bool; +} + +/// Set the prime member's vote as the default vote. +pub struct PrimeDefaultVote; + +impl DefaultVote for PrimeDefaultVote { + fn default_vote( + prime_vote: Option, + _yes_votes: MemberCount, + _no_votes: MemberCount, + _len: MemberCount, + ) -> bool { + prime_vote.unwrap_or(false) + } +} + +/// First see if yes vote are over majority of the whole collective. If so, set the default vote +/// as yes. Otherwise, use the prime meber's vote as the default vote. +pub struct MoreThanMajorityThenPrimeDefaultVote; + +impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote { + fn default_vote( + prime_vote: Option, + yes_votes: MemberCount, + _no_votes: MemberCount, + len: MemberCount, + ) -> bool { + let more_than_majority = yes_votes * 2 > len; + more_than_majority || prime_vote.unwrap_or(false) + } +} pub trait WeightInfo { fn set_members(m: u32, n: u32, p: u32, ) -> Weight; - fn execute(m: u32, b: u32, ) -> Weight; - fn propose_execute(m: u32, b: u32, ) -> Weight; - fn propose_proposed(m: u32, p: u32, b: u32, ) -> Weight; + fn execute(b: u32, m: u32, ) -> Weight; + fn propose_execute(b: u32, m: u32, ) -> Weight; + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight; fn vote(m: u32, ) -> Weight; - fn close_early_disapproved(m: u32, p: u32, b: u32, ) -> Weight; - fn close_early_approved(m: u32, p: u32, b: u32, ) -> Weight; - fn close_disapproved(m: u32, p: u32, b: u32, ) -> Weight; - fn close_approved(m: u32, p: u32, b: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn set_members(_m: u32, _n: u32, _p: u32, ) -> Weight { 1_000_000_000 } - fn execute(_m: u32, _b: u32, ) -> Weight { 1_000_000_000 } - fn propose_execute(_m: u32, _b: u32, ) -> Weight { 1_000_000_000 } - fn propose_proposed(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } - fn vote(_m: u32, ) -> Weight { 1_000_000_000 } - fn close_early_disapproved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } - fn close_early_approved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } - fn close_disapproved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } - fn close_approved(_m: u32, _p: u32, _b: u32, ) -> Weight { 1_000_000_000 } + fn close_early_disapproved(m: u32, p: u32, ) -> Weight; + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight; + fn close_disapproved(m: u32, p: u32, ) -> Weight; + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight; + fn disapprove_proposal(p: u32, ) -> Weight; } pub trait Trait: frame_system::Trait { @@ -117,7 +150,17 @@ pub trait Trait: frame_system::Trait { type MotionDuration: Get; /// Maximum number of proposals allowed to be active in parallel. - type MaxProposals: Get; + type MaxProposals: Get; + + /// The maximum number of members supported by the pallet. Used for weight estimation. + /// + /// NOTE: + /// + Benchmarks will need to be re-run and weights adjusted if this changes. + /// + This pallet assumes that dependents keep to the limit without enforcing it. + type MaxMembers: Get; + + /// Default vote strategy of this collective. + type DefaultVote: DefaultVote; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -166,8 +209,7 @@ decl_storage! { pub ProposalCount get(fn proposal_count): u32; /// The current members of the collective. This is stored sorted (just by value). pub Members get(fn members): Vec; - /// The member who provides the default vote for any other members that do not vote before - /// the timeout. If None, then no member has that privilege. + /// The prime member that helps determine the default vote behavior in case of absentations. pub Prime get(fn prime): Option; } add_extra_genesis { @@ -184,26 +226,26 @@ decl_event! { { /// A motion (given hash) has been proposed (by given account) with a threshold (given /// `MemberCount`). - /// [account, proposal_index, proposal_hash, threshold] + /// \[account, proposal_index, proposal_hash, threshold\] Proposed(AccountId, ProposalIndex, Hash, MemberCount), /// A motion (given hash) has been voted on by given account, leaving /// a tally (yes votes and no votes given respectively as `MemberCount`). - /// [account, proposal_hash, voted, yes, no] + /// \[account, proposal_hash, voted, yes, no\] Voted(AccountId, Hash, bool, MemberCount, MemberCount), /// A motion was approved by the required threshold. - /// [proposal_hash] + /// \[proposal_hash\] Approved(Hash), /// A motion was not approved by the required threshold. - /// [proposal_hash] + /// \[proposal_hash\] Disapproved(Hash), /// A motion was executed; result will be `Ok` if it returned without error. - /// [proposal_hash, result] + /// \[proposal_hash, result\] Executed(Hash, DispatchResult), /// A single member did some action; result will be `Ok` if it returned without error. - /// [proposal_hash, result] + /// \[proposal_hash, result\] MemberExecuted(Hash, DispatchResult), /// A proposal was closed because its threshold was reached or after its duration was up. - /// [proposal_hash, yes, no] + /// \[proposal_hash, yes, no\] Closed(Hash, MemberCount, MemberCount), } } @@ -233,131 +275,6 @@ decl_error! { } } -/// Functions for calcuating the weight of dispatchables. -mod weight_for { - use frame_support::{traits::Get, weights::Weight}; - use super::{Trait, Instance}; - - /// Calculate the weight for `set_members`. - /// - /// Based on benchmark: - /// 0 + M * 20.47 + N * 0.109 + P * 26.29 µs (min squares analysis) - /// - /// Note: The complexity of `set_members` is quadratic (`O(MP + N)`), so the linear approximation - /// of the benchmark is not always permissible. It is here, though, because the linear approximation - /// covered the range of possible values and we estimate weight via the worst case (max paramter - /// values) before execution so we can be sure that we are only overestimating. - pub(crate) fn set_members, I: Instance>( - old_count: Weight, - new_count: Weight, - proposals: Weight, - ) -> Weight { - let db = T::DbWeight::get(); - db.reads_writes(1, 1) // mutate `Members` - .saturating_add(db.writes(1)) // set `Prime` - .saturating_add(db.reads(1)) // read `Proposals` - .saturating_add(db.reads_writes(proposals, proposals)) // update votes (`Voting`) - .saturating_add(old_count.saturating_mul(21_000_000)) // M - .saturating_add(new_count.saturating_mul(110_000)) // N - .saturating_add(proposals.saturating_mul(27_000_000)) // P - } - - /// Calculate the weight for `execute`. - /// - /// Based on benchmark: - /// 22.62 + M * 0.115 + B * 0.003 µs (min squares analysis) - pub(crate) fn execute, I: Instance>( - members: Weight, - proposal: Weight, - length: Weight, - ) -> Weight { - T::DbWeight::get().reads(1) // read members for `is_member` - .saturating_add(23_000_000) // constant - .saturating_add(length.saturating_mul(4_000)) // B - .saturating_add(members.saturating_mul(120_000)) // M - .saturating_add(proposal) // P - } - - /// Calculate the weight for `propose` if the proposal is executed straight away (`threshold < 2`). - /// - /// Based on benchmark: - /// 28.12 + M * 0.218 + B * 0.003 µs (min squares analysis) - pub(crate) fn propose_execute, I: Instance>( - members: Weight, - proposal: Weight, - length: Weight, - ) -> Weight { - T::DbWeight::get().reads(2) // `is_member` + `contains_key` - .saturating_add(29_000_000) // constant - .saturating_add(length.saturating_mul(3_000)) // B - .saturating_add(members.saturating_mul(220_000)) // M - .saturating_add(proposal) // P1 - } - - /// Calculate the weight for `propose` if the proposal is put up for a vote (`threshold >= 2`). - /// - /// Based on benchmark: - /// 49.75 + M * 0.105 + P2 0.502 + B * 0.006 µs (min squares analysis) - pub(crate) fn propose_proposed, I: Instance>( - members: Weight, - proposals: Weight, - length: Weight, - ) -> Weight { - T::DbWeight::get().reads(2) // `is_member` + `contains_key` - .saturating_add(T::DbWeight::get().reads_writes(2, 4)) // `proposal` insertion - .saturating_add(50_000_000) // constant - .saturating_add(length.saturating_mul(6_000)) // B - .saturating_add(members.saturating_mul(110_000)) // M - .saturating_add(proposals.saturating_mul(510_000)) // P2 - } - - /// Calculate the weight for `vote`. - /// - /// Based on benchmark: - /// 24.03 + M * 0.349 + P * 0.119 + B * 0.003 µs (min squares analysis) - pub(crate) fn vote, I: Instance>( - members: Weight, - ) -> Weight { - T::DbWeight::get().reads(1) // read `Members` - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) // mutate `Voting` - .saturating_add(30_000_000) // constant - .saturating_add(members.saturating_mul(500_000)) // M - } - - /// Calculate the weight for `close`. - /// - /// Based on benchmarks: - /// - early disapproved: 37.21 + M * 0.239 + P2 * 0.466 + B * 0.002 µs (min squares analysis) - /// - early approved: 50.82 + M * 0.211 + P2 * 0.478 + B * 0.008 µs (min squares analysis) - /// - disapproved: 51.08 + M * 0.224 + P2 * 0.475 + B * 0.003 µs (min squares analysis) - /// - approved: 65.95 + M * 0.226 + P2 * 0.487 + B * 0.005 µs (min squares analysis) - pub(crate) fn close, I: Instance>( - members: Weight, - proposal_weight: Weight, - proposals: Weight, - length: Weight, - ) -> Weight { - let db = T::DbWeight::get(); - close_without_finalize::(members, length) - .saturating_add(db.reads(1)) // `Prime` - .saturating_add(db.writes(1)) // `Proposals` - .saturating_add(db.writes(1)) // `Voting` - .saturating_add(proposal_weight) // P1 - .saturating_add(proposals.saturating_mul(490_000)) // P2 - } - - /// Calculate the weight for `close` without the call to `approve/disapprove_proposal`. - pub(crate) fn close_without_finalize, I: Instance>( - members: Weight, - length: Weight, - ) -> Weight { - T::DbWeight::get().reads(3) // `Members`, `Voting`, `ProposalOf` - .saturating_add(66_000_000) // constant - .saturating_add(length.saturating_mul(8_000)) // B - .saturating_add(members.saturating_mul(250_000)) // M - } -} - /// Return the weight of a dispatch call result as an `Option`. /// /// Will return the weight regardless of what the state of the result is. @@ -385,7 +302,7 @@ decl_module! { /// /// Requires root origin. /// - /// NOTE: Does not enforce the expected `MAX_MEMBERS` limit on the amount of members, but + /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but /// the weight estimations rely on it to estimate dispatchable weight. /// /// # @@ -401,10 +318,10 @@ decl_module! { /// - 1 storage write (codec `O(1)`) for deleting the old `prime` and setting the new one /// # #[weight = ( - weight_for::set_members::( - (*old_count).into(), // M - new_members.len() as Weight, // N - T::MaxProposals::get().into(), // P + T::WeightInfo::set_members( + *old_count, // M + new_members.len() as u32, // N + T::MaxProposals::get() // P ), DispatchClass::Operational )] @@ -414,10 +331,10 @@ decl_module! { old_count: MemberCount, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - if new_members.len() > MAX_MEMBERS as usize { + if new_members.len() > T::MaxMembers::get() as usize { debug::error!( "New members count exceeds maximum amount of members expected. (expected: {}, actual: {})", - MAX_MEMBERS, + T::MaxMembers::get(), new_members.len() ); } @@ -435,10 +352,10 @@ decl_module! { >::set_members_sorted(&new_members, &old); Prime::::set(prime); - Ok(Some(weight_for::set_members::( - old.len() as Weight, // M - new_members.len() as Weight, // N - T::MaxProposals::get().into(), // P + Ok(Some(T::WeightInfo::set_members( + old.len() as u32, // M + new_members.len() as u32, // N + T::MaxProposals::get(), // P )).into()) } @@ -453,11 +370,10 @@ decl_module! { /// - 1 event /// # #[weight = ( - weight_for::execute::( - MAX_MEMBERS.into(), - proposal.get_dispatch_info().weight, - *length_bound as Weight, - ), + T::WeightInfo::execute( + *length_bound, // B + T::MaxMembers::get(), // M + ).saturating_add(proposal.get_dispatch_info().weight), // P DispatchClass::Operational )] fn execute(origin, @@ -476,11 +392,12 @@ decl_module! { RawEvent::MemberExecuted(proposal_hash, result.map(|_| ()).map_err(|e| e.error)) ); - Ok(get_result_weight(result).map(|w| weight_for::execute::( - members.len() as Weight, - w, - proposal_len as Weight - )).into()) + Ok(get_result_weight(result).map(|w| { + T::WeightInfo::execute( + proposal_len as u32, // B + members.len() as u32, // M + ).saturating_add(w) // P + }).into()) } /// Add a new proposal to either be voted on or executed directly. @@ -512,16 +429,15 @@ decl_module! { /// # #[weight = ( if *threshold < 2 { - weight_for::propose_execute::( - MAX_MEMBERS.into(), // M - proposal.get_dispatch_info().weight, // P1 - *length_bound as Weight, // B - ) + T::WeightInfo::propose_execute( + *length_bound, // B + T::MaxMembers::get(), // M + ).saturating_add(proposal.get_dispatch_info().weight) // P1 } else { - weight_for::propose_proposed::( - MAX_MEMBERS.into(), // M - T::MaxProposals::get().into(), // P2 - *length_bound as Weight, // B + T::WeightInfo::propose_proposed( + *length_bound, // B + T::MaxMembers::get(), // M + T::MaxProposals::get(), // P2 ) }, DispatchClass::Operational @@ -547,11 +463,12 @@ decl_module! { RawEvent::Executed(proposal_hash, result.map(|_| ()).map_err(|e| e.error)) ); - Ok(get_result_weight(result).map(|w| weight_for::propose_execute::( - members.len() as Weight, // M - w, // P1 - proposal_len as Weight, // B - )).into()) + Ok(get_result_weight(result).map(|w| { + T::WeightInfo::propose_execute( + proposal_len as u32, // B + members.len() as u32, // M + ).saturating_add(w) // P1 + }).into()) } else { let active_proposals = >::try_mutate(|proposals| -> Result { @@ -571,10 +488,10 @@ decl_module! { Self::deposit_event(RawEvent::Proposed(who, index, proposal_hash, threshold)); - Ok(Some(weight_for::propose_proposed::( - members.len() as Weight, // M - active_proposals as Weight, // P2 - proposal_len as Weight, // B + Ok(Some(T::WeightInfo::propose_proposed( + proposal_len as u32, // B + members.len() as u32, // M + active_proposals as u32, // P2 )).into()) } } @@ -592,7 +509,7 @@ decl_module! { /// - 1 event /// # #[weight = ( - weight_for::vote::(MAX_MEMBERS.into()), + T::WeightInfo::vote(T::MaxMembers::get()), DispatchClass::Operational )] fn vote(origin, @@ -636,7 +553,7 @@ decl_module! { Voting::::insert(&proposal, voting); - Ok(Some(weight_for::vote::(members.len() as Weight)).into()) + Ok(Some(T::WeightInfo::vote(members.len() as u32)).into()) } /// Close a vote that is either approved, disapproved or whose voting period has ended. @@ -667,12 +584,17 @@ decl_module! { /// - up to 3 events /// # #[weight = ( - weight_for::close::( - MAX_MEMBERS.into(), // `M` - *proposal_weight_bound, // `P1` - T::MaxProposals::get().into(), // `P2` - *length_bound as Weight, // B - ), + { + let b = *length_bound; + let m = T::MaxMembers::get(); + let p1 = *proposal_weight_bound; + let p2 = T::MaxProposals::get(); + T::WeightInfo::close_early_approved(b, m, p2) + .max(T::WeightInfo::close_early_disapproved(m, p2)) + .max(T::WeightInfo::close_approved(b, m, p2)) + .max(T::WeightInfo::close_disapproved(m, p2)) + .saturating_add(p1) + }, DispatchClass::Operational )] fn close(origin, @@ -699,25 +621,27 @@ decl_module! { proposal_weight_bound )?; Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); - let approve_weight = Self::do_approve_proposal(seats, voting, proposal_hash, proposal); + let (proposal_weight, proposal_count) = + Self::do_approve_proposal(seats, voting, proposal_hash, proposal); return Ok(Some( - weight_for::close_without_finalize::(seats.into(), len as Weight) - .saturating_add(approve_weight) + T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight) ).into()); } else if disapproved { Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); - let disapprove_weight = Self::do_disapprove_proposal(proposal_hash); + let proposal_count = Self::do_disapprove_proposal(proposal_hash); return Ok(Some( - weight_for::close_without_finalize::(seats.into(), 0) - .saturating_add(disapprove_weight) + T::WeightInfo::close_early_disapproved(seats, proposal_count) ).into()); } // Only allow actual closing of the proposal after the voting period has ended. ensure!(system::Module::::block_number() >= voting.end, Error::::TooEarly); - // default to true only if there's a prime and they voted in favour. - let default = Self::prime().map_or(false, |who| voting.ayes.iter().any(|a| a == &who)); + let prime_vote = Self::prime().map(|who| voting.ayes.iter().any(|a| a == &who)); + + // default voting strategy. + let default = T::DefaultVote::default_vote(prime_vote, yes_votes, no_votes, seats); let abstentions = seats - (yes_votes + no_votes); match default { @@ -733,19 +657,17 @@ decl_module! { proposal_weight_bound )?; Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); - let approve_weight = Self::do_approve_proposal(seats, voting, proposal_hash, proposal); + let (proposal_weight, proposal_count) = + Self::do_approve_proposal(seats, voting, proposal_hash, proposal); return Ok(Some( - weight_for::close_without_finalize::(seats.into(), len as Weight) - .saturating_add(T::DbWeight::get().reads(1)) // read `Prime` - .saturating_add(approve_weight) + T::WeightInfo::close_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight) ).into()); } else { Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); - let disapprove_weight = Self::do_disapprove_proposal(proposal_hash); + let proposal_count = Self::do_disapprove_proposal(proposal_hash); return Ok(Some( - weight_for::close_without_finalize::(seats.into(), 0) - .saturating_add(T::DbWeight::get().reads(1)) // read `Prime` - .saturating_add(disapprove_weight) + T::WeightInfo::close_disapproved(seats, proposal_count) ).into()); } } @@ -759,18 +681,15 @@ decl_module! { /// /// # /// Complexity: O(P) where P is the number of max proposals - /// Base Weight: .49 * P /// DB Weight: /// * Reads: Proposals /// * Writes: Voting, Proposals, ProposalOf /// # - #[weight = T::DbWeight::get().reads_writes(1, 3) // `Voting`, `Proposals`, `ProposalOf` - .saturating_add(490_000 * Weight::from(T::MaxProposals::get())) // P2 - ] + #[weight = T::WeightInfo::disapprove_proposal(T::MaxProposals::get())] fn disapprove_proposal(origin, proposal_hash: T::Hash) -> DispatchResultWithPostInfo { ensure_root(origin)?; - let actual_weight = Self::do_disapprove_proposal(proposal_hash); - Ok(Some(actual_weight).into()) + let proposal_count = Self::do_disapprove_proposal(proposal_hash); + Ok(Some(T::WeightInfo::disapprove_proposal(proposal_count)).into()) } } } @@ -822,8 +741,7 @@ impl, I: Instance> Module { voting: Votes, proposal_hash: T::Hash, proposal: >::Proposal, - ) -> Weight { - let mut weight: Weight = 0; + ) -> (Weight, u32) { Self::deposit_event(RawEvent::Approved(proposal_hash)); let dispatch_weight = proposal.get_dispatch_info().weight; @@ -832,23 +750,21 @@ impl, I: Instance> Module { Self::deposit_event( RawEvent::Executed(proposal_hash, result.map(|_| ()).map_err(|e| e.error)) ); - weight = weight.saturating_add( - // default to the dispatch info weight for safety - get_result_weight(result).unwrap_or(dispatch_weight) // P1 - ); + // default to the dispatch info weight for safety + let proposal_weight = get_result_weight(result).unwrap_or(dispatch_weight); // P1 - let remove_proposal_weight = Self::remove_proposal(proposal_hash); - weight.saturating_add(remove_proposal_weight) + let proposal_count = Self::remove_proposal(proposal_hash); + (proposal_weight, proposal_count) } - fn do_disapprove_proposal(proposal_hash: T::Hash) -> Weight { + fn do_disapprove_proposal(proposal_hash: T::Hash) -> u32 { // disapproved Self::deposit_event(RawEvent::Disapproved(proposal_hash)); Self::remove_proposal(proposal_hash) } // Removes a proposal from the pallet, cleaning up votes and the vector of proposals. - fn remove_proposal(proposal_hash: T::Hash) -> Weight { + fn remove_proposal(proposal_hash: T::Hash) -> u32 { // remove proposal and vote ProposalOf::::remove(&proposal_hash); Voting::::remove(&proposal_hash); @@ -856,15 +772,14 @@ impl, I: Instance> Module { proposals.retain(|h| h != &proposal_hash); proposals.len() + 1 // calculate weight based on original length }); - T::DbWeight::get().reads_writes(1, 3) // `Voting`, `Proposals`, `ProposalOf` - .saturating_add(490_000 * num_proposals as Weight) // P2 + num_proposals as u32 } } impl, I: Instance> ChangeMembers for Module { /// Update the members of the collective. Votes are updated and the prime is reset. /// - /// NOTE: Does not enforce the expected `MAX_MEMBERS` limit on the amount of members, but + /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but /// the weight estimations rely on it to estimate dispatchable weight. /// /// # @@ -884,10 +799,10 @@ impl, I: Instance> ChangeMembers for Module { outgoing: &[T::AccountId], new: &[T::AccountId], ) { - if new.len() > MAX_MEMBERS as usize { + if new.len() > T::MaxMembers::get() as usize { debug::error!( "New members count exceeds maximum amount of members expected. (expected: {}, actual: {})", - MAX_MEMBERS, + T::MaxMembers::get(), new.len() ); } @@ -1047,6 +962,7 @@ mod tests { pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const MotionDuration: u64 = 3; pub const MaxProposals: u32 = 100; + pub const MaxMembers: u32 = 100; } impl frame_system::Trait for Test { type BaseCallFilter = (); @@ -1069,7 +985,7 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -1081,6 +997,18 @@ mod tests { type Event = Event; type MotionDuration = MotionDuration; type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = PrimeDefaultVote; + type WeightInfo = (); + } + impl Trait for Test { + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = MotionDuration; + type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; type WeightInfo = (); } impl Trait for Test { @@ -1089,6 +1017,8 @@ mod tests { type Event = Event; type MotionDuration = MotionDuration; type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = PrimeDefaultVote; type WeightInfo = (); } @@ -1103,6 +1033,7 @@ mod tests { { System: system::{Module, Call, Event}, Collective: collective::::{Module, Call, Event, Origin, Config}, + CollectiveMajority: collective::::{Module, Call, Event, Origin, Config}, DefaultCollective: collective::{Module, Call, Event, Origin, Config}, } ); @@ -1113,12 +1044,20 @@ mod tests { members: vec![1, 2, 3], phantom: Default::default(), }), + collective_Instance2: Some(collective::GenesisConfig { + members: vec![1, 2, 3, 4, 5], + phantom: Default::default(), + }), collective: None, }.build_storage().unwrap().into(); ext.execute_with(|| System::set_block_number(1)); ext } + fn make_proposal(value: u64) -> Call { + Call::System(frame_system::Call::remark(value.encode())) + } + #[test] fn motions_basic_environment_works() { new_test_ext().execute_with(|| { @@ -1127,10 +1066,6 @@ mod tests { }); } - fn make_proposal(value: u64) -> Call { - Call::System(frame_system::Call::remark(value.encode())) - } - #[test] fn close_works() { new_test_ext().execute_with(|| { @@ -1164,7 +1099,7 @@ mod tests { #[test] fn proposal_weight_limit_works_on_approve() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MAX_MEMBERS)); + let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); @@ -1184,7 +1119,7 @@ mod tests { #[test] fn proposal_weight_limit_ignored_on_disapprove() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MAX_MEMBERS)); + let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); @@ -1205,7 +1140,7 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(3), MAX_MEMBERS)); + assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(3), MaxMembers::get())); assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); @@ -1230,7 +1165,7 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(1), MAX_MEMBERS)); + assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(1), MaxMembers::get())); assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); @@ -1249,6 +1184,34 @@ mod tests { }); } + #[test] + fn close_with_no_prime_but_majority_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(CollectiveMajority::set_members(Origin::root(), vec![1, 2, 3, 4, 5], Some(5), MaxMembers::get())); + + assert_ok!(CollectiveMajority::propose(Origin::signed(1), 5, Box::new(proposal.clone()), proposal_len)); + assert_ok!(CollectiveMajority::vote(Origin::signed(2), hash.clone(), 0, true)); + assert_ok!(CollectiveMajority::vote(Origin::signed(3), hash.clone(), 0, true)); + + System::set_block_number(4); + assert_ok!(CollectiveMajority::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); + + let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; + assert_eq!(System::events(), vec![ + record(Event::collective_Instance2(RawEvent::Proposed(1, 0, hash.clone(), 5))), + record(Event::collective_Instance2(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::collective_Instance2(RawEvent::Voted(3, hash.clone(), true, 3, 0))), + record(Event::collective_Instance2(RawEvent::Closed(hash.clone(), 5, 0))), + record(Event::collective_Instance2(RawEvent::Approved(hash.clone()))), + record(Event::collective_Instance2(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) + ]); + }); + } + #[test] fn removal_of_old_voters_votes_works() { new_test_ext().execute_with(|| { @@ -1298,7 +1261,7 @@ mod tests { Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 3, 4], None, MAX_MEMBERS)); + assert_ok!(Collective::set_members(Origin::root(), vec![2, 3, 4], None, MaxMembers::get())); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) @@ -1313,7 +1276,7 @@ mod tests { Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 4], None, MAX_MEMBERS)); + assert_ok!(Collective::set_members(Origin::root(), vec![2, 4], None, MaxMembers::get())); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) @@ -1371,7 +1334,7 @@ mod tests { #[test] fn correct_validate_and_get_proposal() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MAX_MEMBERS)); + let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); let length = proposal.encode().len() as u32; assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), length)); diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 1a42af6833b8bd7d4c7670a405e0ef0e8d89ce00..d18fed1bc8a3e4dac45eddc52e37fcb67588b689 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-contracts" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for WASM contracts" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,27 +15,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "common" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "common" } parity-wasm = { version = "0.41.0", default-features = false } pwasm-utils = { version = "0.14.0", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/sandbox" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-sandbox = { version = "0.8.0", default-features = false, path = "../../primitives/sandbox" } wasmi-validation = { version = "0.3.0", default-features = false } wat = { version = "1.0", optional = true, default-features = false } [dev-dependencies] assert_matches = "1.3.0" -hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../timestamp" } -pallet-randomness-collective-flip = { version = "2.0.0-rc5", path = "../randomness-collective-flip" } +hex-literal = "0.3.1" +pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-timestamp = { version = "2.0.0", path = "../timestamp" } +pallet-randomness-collective-flip = { version = "2.0.0", path = "../randomness-collective-flip" } pretty_assertions = "0.6.1" wat = "1.0" diff --git a/frame/contracts/README.md b/frame/contracts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dddcc3c8b8b85dc6e98fc8b4d19c50e86ed2e193 --- /dev/null +++ b/frame/contracts/README.md @@ -0,0 +1,64 @@ +# Contract Module + +The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. + +- [`contract::Trait`](https://docs.rs/pallet-contracts/latest/pallet_contracts/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-contracts/latest/pallet_contracts/enum.Call.html) + +## Overview + +This module extends accounts based on the `Currency` trait to have smart-contract functionality. It can +be used with other modules that implement accounts based on `Currency`. These "smart-contract accounts" +have the ability to instantiate smart-contracts and make calls to other contract and non-contract accounts. + +The smart-contract code is stored once in a `code_cache`, and later retrievable via its `code_hash`. +This means that multiple smart-contracts can be instantiated from the same `code_cache`, without replicating +the code each time. + +When a smart-contract is called, its associated code is retrieved via the code hash and gets executed. +This call can alter the storage entries of the smart-contract account, instantiate new smart-contracts, +or call other smart-contracts. + +Finally, when an account is reaped, its associated code and storage of the smart-contract account +will also be deleted. + +### Gas + +Senders must specify a gas limit with every call, as all instructions invoked by the smart-contract require gas. +Unused gas is refunded after the call, regardless of the execution outcome. + +If the gas limit is reached, then all calls and state changes (including balance transfers) are only +reverted at the current call's contract level. For example, if contract A calls B and B runs out of gas mid-call, +then all of B's calls are reverted. Assuming correct error handling by contract A, A's other calls and state +changes still persist. + +### Notable Scenarios + +Contract call failures are not always cascading. When failures occur in a sub-call, they do not "bubble up", +and the call will only revert at the specific contract level. For example, if contract A calls contract B, and B +fails, A can decide how to handle that failure, either proceeding or reverting A's changes. + +## Interface + +### Dispatchable functions + +* `put_code` - Stores the given binary Wasm code into the chain's storage and returns its `code_hash`. +* `instantiate` - Deploys a new contract from the given `code_hash`, optionally transferring some balance. +This instantiates a new smart contract account and calls its contract deploy handler to +initialize the contract. +* `call` - Makes a call to an account, optionally transferring some balance. + +## Usage + +The Contract module is a work in progress. The following examples show how this Contract module +can be used to instantiate and call contracts. + +* [`ink`](https://github.com/paritytech/ink) is +an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing +WebAssembly based smart contracts in the Rust programming language. This is a work in progress. + +## Related Modules + +* [Balances](https://docs.rs/pallet-balances/latest/pallet_balances/) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index a8f5e40760097409279c1915f8d81fb2d132c7c7..753ef9c08122f8840514932f9fd17ce5e8b79919 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # This crate should not rely on any of the frame primitives. codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] diff --git a/frame/contracts/common/README.md b/frame/contracts/common/README.md new file mode 100644 index 0000000000000000000000000000000000000000..12718cd86425b4820dc63ce1369c7e4d93d3bc93 --- /dev/null +++ b/frame/contracts/common/README.md @@ -0,0 +1,3 @@ +A crate that hosts a common definitions that are relevant for the pallet-contracts. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index c6b8fc8ac10dfcbc6a85606bed11d1c6f5c75c11..587abcbcddaec7b9c2833f856b106521fd089965 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,29 +1,30 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Node-specific RPC methods for interaction with contracts." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4" } -jsonrpc-core = "14.2.0" -jsonrpc-core-client = "14.2.0" -jsonrpc-derive = "14.2.1" -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-rc5", path = "../../../primitives/rpc" } +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "2.0.0-rc5", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-rc5", path = "./runtime-api" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +pallet-contracts-primitives = { version = "2.0.0", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0", path = "./runtime-api" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/contracts/rpc/README.md b/frame/contracts/rpc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..be6df237bf60d225a0b27b2f4e8d4651e6358016 --- /dev/null +++ b/frame/contracts/rpc/README.md @@ -0,0 +1,3 @@ +Node-specific RPC methods for interaction with contracts. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index a9266b986c6ca8430c7cd7d9d917f91416e5bc91..04becf2b45f4971464d8e09c6c3f855e4eccdb49 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,22 +1,23 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by Contracts RPC extensions." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "2.0.0-rc5", default-features = false, path = "../../common" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } +pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../common" } [features] default = ["std"] diff --git a/frame/contracts/rpc/runtime-api/README.md b/frame/contracts/rpc/runtime-api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d57f29a93bd1dca89ea826a535d47036cb97efd7 --- /dev/null +++ b/frame/contracts/rpc/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition required by Contracts RPC extensions. + +This API should be imported and implemented by the runtime, +of a node that wants to use the custom RPC extension +adding Contracts access methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 138c8e995a0a24eb6b4ec8c1812ef202e18e6252..4755573783af7fe75c211958b60bf8ddd8b497a1 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -686,11 +686,11 @@ decl_event! { ::AccountId, ::Hash { - /// Contract deployed by address at the specified address. [owner, contract] + /// Contract deployed by address at the specified address. \[owner, contract\] Instantiated(AccountId, AccountId), /// Contract has been evicted and is now in tombstone state. - /// [contract, tombstone] + /// \[contract, tombstone\] /// /// # Params /// @@ -699,7 +699,7 @@ decl_event! { Evicted(AccountId, bool), /// Restoration for a contract has been successful. - /// [donor, dest, code_hash, rent_allowance] + /// \[donor, dest, code_hash, rent_allowance\] /// /// # Params /// @@ -710,14 +710,14 @@ decl_event! { Restored(AccountId, AccountId, Hash, Balance), /// Code with the specified hash has been stored. - /// [code_hash] + /// \[code_hash\] CodeStored(Hash), - /// Triggered when the current [schedule] is updated. + /// Triggered when the current \[schedule\] is updated. ScheduleUpdated(u32), /// An event deposited upon execution of a contract from the account. - /// [account, data] + /// \[account, data\] ContractExecution(AccountId, Vec), } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index bd1242ff6701a6ed8b9bb854756f86db36971e34..83a680396f41742aaceb10e9cecbee771cfd61f5 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -137,13 +137,14 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = MetaEvent; type DustRemoval = (); diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 3d67b4d0ec5786ce729217e990366f2fffc9ef10..44639a2275644d70d076f0f265fee66e1031eb9c 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-democracy" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for democracy" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,20 +15,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-scheduler = { version = "2.0.0-rc5", path = "../scheduler" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } -hex-literal = "0.2.1" +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-scheduler = { version = "2.0.0", path = "../scheduler" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +hex-literal = "0.3.1" [features] default = ["std"] diff --git a/frame/democracy/README.md b/frame/democracy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ffbf2f36a176006a9308cd9fe295da0a89fff62a --- /dev/null +++ b/frame/democracy/README.md @@ -0,0 +1,135 @@ +# Democracy Pallet + +- [`democracy::Trait`](https://docs.rs/pallet-democracy/latest/pallet_democracy/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-democracy/latest/pallet_democracy/enum.Call.html) + +## Overview + +The Democracy pallet handles the administration of general stakeholder voting. + +There are two different queues that a proposal can be added to before it +becomes a referendum, 1) the proposal queue consisting of all public proposals +and 2) the external queue consisting of a single proposal that originates +from one of the _external_ origins (such as a collective group). + +Every launch period - a length defined in the runtime - the Democracy pallet +launches a referendum from a proposal that it takes from either the proposal +queue or the external queue in turn. Any token holder in the system can vote +on referenda. The voting system +uses time-lock voting by allowing the token holder to set their _conviction_ +behind a vote. The conviction will dictate the length of time the tokens +will be locked, as well as the multiplier that scales the vote power. + +### Terminology + +- **Enactment Period:** The minimum period of locking and the period between a proposal being +approved and enacted. +- **Lock Period:** A period of time after proposal enactment that the tokens of _winning_ voters +will be locked. +- **Conviction:** An indication of a voter's strength of belief in their vote. An increase +of one in conviction indicates that a token holder is willing to lock their tokens for twice +as many lock periods after enactment. +- **Vote:** A value that can either be in approval ("Aye") or rejection ("Nay") + of a particular referendum. +- **Proposal:** A submission to the chain that represents an action that a proposer (either an +account or an external origin) suggests that the system adopt. +- **Referendum:** A proposal that is in the process of being voted on for + either acceptance or rejection as a change to the system. +- **Delegation:** The act of granting your voting power to the decisions of another account for + up to a certain conviction. + +### Adaptive Quorum Biasing + +A _referendum_ can be either simple majority-carries in which 50%+1 of the +votes decide the outcome or _adaptive quorum biased_. Adaptive quorum biasing +makes the threshold for passing or rejecting a referendum higher or lower +depending on how the referendum was originally proposed. There are two types of +adaptive quorum biasing: 1) _positive turnout bias_ makes a referendum +require a super-majority to pass that decreases as turnout increases and +2) _negative turnout bias_ makes a referendum require a super-majority to +reject that decreases as turnout increases. Another way to think about the +quorum biasing is that _positive bias_ referendums will be rejected by +default and _negative bias_ referendums get passed by default. + +## Interface + +### Dispatchable Functions + +#### Public + +These calls can be made from any externally held account capable of creating +a signed extrinsic. + +Basic actions: +- `propose` - Submits a sensitive action, represented as a hash. Requires a deposit. +- `second` - Signals agreement with a proposal, moves it higher on the proposal queue, and + requires a matching deposit to the original. +- `vote` - Votes in a referendum, either the vote is "Aye" to enact the proposal or "Nay" to + keep the status quo. +- `unvote` - Cancel a previous vote, this must be done by the voter before the vote ends. +- `delegate` - Delegates the voting power (tokens * conviction) to another account. +- `undelegate` - Stops the delegation of voting power to another account. + +Administration actions that can be done to any account: +- `reap_vote` - Remove some account's expired votes. +- `unlock` - Redetermine the account's balance lock, potentially making tokens available. + +Preimage actions: +- `note_preimage` - Registers the preimage for an upcoming proposal, requires + a deposit that is returned once the proposal is enacted. +- `note_preimage_operational` - same but provided by `T::OperationalPreimageOrigin`. +- `note_imminent_preimage` - Registers the preimage for an upcoming proposal. + Does not require a deposit, but the proposal must be in the dispatch queue. +- `note_imminent_preimage_operational` - same but provided by `T::OperationalPreimageOrigin`. +- `reap_preimage` - Removes the preimage for an expired proposal. Will only + work under the condition that it's the same account that noted it and + after the voting period, OR it's a different account after the enactment period. + +#### Cancellation Origin + +This call can only be made by the `CancellationOrigin`. + +- `emergency_cancel` - Schedules an emergency cancellation of a referendum. + Can only happen once to a specific referendum. + +#### ExternalOrigin + +This call can only be made by the `ExternalOrigin`. + +- `external_propose` - Schedules a proposal to become a referendum once it is is legal + for an externally proposed referendum. + +#### External Majority Origin + +This call can only be made by the `ExternalMajorityOrigin`. + +- `external_propose_majority` - Schedules a proposal to become a majority-carries + referendum once it is legal for an externally proposed referendum. + +#### External Default Origin + +This call can only be made by the `ExternalDefaultOrigin`. + +- `external_propose_default` - Schedules a proposal to become a negative-turnout-bias + referendum once it is legal for an externally proposed referendum. + +#### Fast Track Origin + +This call can only be made by the `FastTrackOrigin`. + +- `fast_track` - Schedules the current externally proposed proposal that + is "majority-carries" to become a referendum immediately. + +#### Veto Origin + +This call can only be made by the `VetoOrigin`. + +- `veto_external` - Vetoes and blacklists the external proposal hash. + +#### Root + +- `cancel_referendum` - Removes a referendum. +- `cancel_queued` - Cancels a proposal that is queued for enactment. +- `clear_public_proposal` - Removes all public proposals. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index e298b1e4508c2843de3c38009f6cfd68bb6d33c4..9ed732d3234ea570bf69ad5709a3eff8b1fe7939 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -434,41 +434,43 @@ decl_event! { ::Hash, ::BlockNumber, { - /// A motion has been proposed by a public account. [proposal_index, deposit] + /// A motion has been proposed by a public account. \[proposal_index, deposit\] Proposed(PropIndex, Balance), - /// A public proposal has been tabled for referendum vote. [proposal_index, deposit, depositors] + /// A public proposal has been tabled for referendum vote. \[proposal_index, deposit, depositors\] Tabled(PropIndex, Balance, Vec), /// An external proposal has been tabled. ExternalTabled, - /// A referendum has begun. [ref_index, threshold] + /// A referendum has begun. \[ref_index, threshold\] Started(ReferendumIndex, VoteThreshold), - /// A proposal has been approved by referendum. [ref_index] + /// A proposal has been approved by referendum. \[ref_index\] Passed(ReferendumIndex), - /// A proposal has been rejected by referendum. [ref_index] + /// A proposal has been rejected by referendum. \[ref_index\] NotPassed(ReferendumIndex), - /// A referendum has been cancelled. [ref_index] + /// A referendum has been cancelled. \[ref_index\] Cancelled(ReferendumIndex), - /// A proposal has been enacted. [ref_index, is_ok] + /// A proposal has been enacted. \[ref_index, is_ok\] Executed(ReferendumIndex, bool), - /// An account has delegated their vote to another account. [who, target] + /// An account has delegated their vote to another account. \[who, target\] Delegated(AccountId, AccountId), - /// An [account] has cancelled a previous delegation operation. + /// An \[account\] has cancelled a previous delegation operation. Undelegated(AccountId), - /// An external proposal has been vetoed. [who, proposal_hash, until] + /// An external proposal has been vetoed. \[who, proposal_hash, until\] Vetoed(AccountId, Hash, BlockNumber), - /// A proposal's preimage was noted, and the deposit taken. [proposal_hash, who, deposit] + /// A proposal's preimage was noted, and the deposit taken. \[proposal_hash, who, deposit\] PreimageNoted(Hash, AccountId, Balance), /// A proposal preimage was removed and used (the deposit was returned). - /// [proposal_hash, provider, deposit] + /// \[proposal_hash, provider, deposit\] PreimageUsed(Hash, AccountId, Balance), - /// A proposal could not be executed because its preimage was invalid. [proposal_hash, ref_index] + /// A proposal could not be executed because its preimage was invalid. + /// \[proposal_hash, ref_index\] PreimageInvalid(Hash, ReferendumIndex), - /// A proposal could not be executed because its preimage was missing. [proposal_hash, ref_index] + /// A proposal could not be executed because its preimage was missing. + /// \[proposal_hash, ref_index\] PreimageMissing(Hash, ReferendumIndex), /// A registered preimage was removed and the deposit collected by the reaper. - /// [proposal_hash, provider, deposit, reaper] + /// \[proposal_hash, provider, deposit, reaper\] PreimageReaped(Hash, AccountId, Balance, AccountId), - /// An [account] has been unlocked successfully. + /// An \[account\] has been unlocked successfully. Unlocked(AccountId), } } diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 13c6a09a04bc15993b59a2056b321595e688f8fd..7f23eb4cca8ade365fcd1647cbd226903d43a07f 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -112,7 +112,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -128,12 +128,14 @@ impl pallet_scheduler::Trait for Test { type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = (); type WeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = Event; type DustRemoval = (); diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 83ac253a8ba82e4a225671dc34c82c28dd3e4f37..8d59cde19255a04cdf53acb0e320c1b14546b60e 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-elections-phragmen" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet based on seq-Phragmén election method." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,19 +15,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-npos-elections = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/npos-elections" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-npos-elections = { version = "2.0.0", default-features = false, path = "../../primitives/npos-elections" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +hex-literal = "0.3.1" +pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } [features] default = ["std"] diff --git a/frame/elections-phragmen/README.md b/frame/elections-phragmen/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5507d539706328853238de2e86e6ca567adf6dc7 --- /dev/null +++ b/frame/elections-phragmen/README.md @@ -0,0 +1,67 @@ +# Phragmén Election Module. + +An election module based on sequential phragmen. + +### Term and Round + +The election happens in _rounds_: every `N` blocks, all previous members are retired and a new +set is elected (which may or may not have an intersection with the previous set). Each round +lasts for some number of blocks defined by `TermDuration` storage item. The words _term_ and +_round_ can be used interchangeably in this context. + +`TermDuration` might change during a round. This can shorten or extend the length of the round. +The next election round's block number is never stored but rather always checked on the fly. +Based on the current block number and `TermDuration`, the condition `BlockNumber % TermDuration +== 0` being satisfied will always trigger a new election round. + +### Voting + +Voters can vote for any set of the candidates by providing a list of account ids. Invalid votes +(voting for non-candidates) are ignored during election. Yet, a voter _might_ vote for a future +candidate. Voters reserve a bond as they vote. Each vote defines a `value`. This amount is +locked from the account of the voter and indicates the weight of the vote. Voters can update +their votes at any time by calling `vote()` again. This keeps the bond untouched but can +optionally change the locked `value`. After a round, votes are kept and might still be valid for +further rounds. A voter is responsible for calling `remove_voter` once they are done to have +their bond back and remove the lock. + +Voters also report other voters as being defunct to earn their bond. A voter is defunct once all +of the candidates that they have voted for are neither a valid candidate anymore nor a member. +Upon reporting, if the target voter is actually defunct, the reporter will be rewarded by the +voting bond of the target. The target will lose their bond and get removed. If the target is not +defunct, the reporter is slashed and removed. To prevent being reported, voters should manually +submit a `remove_voter()` as soon as they are in the defunct state. + +### Candidacy and Members + +Candidates also reserve a bond as they submit candidacy. A candidate cannot take their candidacy +back. A candidate can end up in one of the below situations: + - **Winner**: A winner is kept as a _member_. They must still have a bond in reserve and they + are automatically counted as a candidate for the next election. + - **Runner-up**: Runners-up are the best candidates immediately after the winners. The number + of runners_up to keep is configurable. Runners-up are used, in order that they are elected, + as replacements when a candidate is kicked by `[remove_member]`, or when an active member + renounces their candidacy. Runners are automatically counted as a candidate for the next + election. + - **Loser**: Any of the candidate who are not a winner are left as losers. A loser might be an + _outgoing member or runner_, meaning that they are an active member who failed to keep their + spot. An outgoing will always lose their bond. + +##### Renouncing candidacy. + +All candidates, elected or not, can renounce their candidacy. A call to [`Module::renounce_candidacy`] +will always cause the candidacy bond to be refunded. + +Note that with the members being the default candidates for the next round and votes persisting +in storage, the election system is entirely stable given no further input. This means that if +the system has a particular set of candidates `C` and voters `V` that lead to a set of members +`M` being elected, as long as `V` and `C` don't remove their candidacy and votes, `M` will keep +being re-elected at the end of each round. + +### Module Information + +- [`election_sp_phragmen::Trait`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/enum.Call.html) +- [`Module`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/struct.Module.html) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 6de9ad57e244fa080c41815436be290546885fb4..e7c3719480b707896e538292e840f47cc730542e 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -28,10 +28,18 @@ use crate::Module as Elections; const BALANCE_FACTOR: u32 = 250; const MAX_VOTERS: u32 = 500; -const MAX_CANDIDATES: u32 = 100; +const MAX_CANDIDATES: u32 = 200; type Lookup = <::Lookup as StaticLookup>::Source; +macro_rules! whitelist { + ($acc:ident) => { + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::Account::::hashed_key_for(&$acc).into() + ); + }; +} + /// grab new account with infinite balance. fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); @@ -40,6 +48,7 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { // important to increase the total issuance since T::CurrencyToVote will need it to be sane for // phragmen to work. T::Currency::issue(amount); + account } @@ -102,10 +111,9 @@ fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) /// Submit one voter. fn submit_voter(caller: T::AccountId, votes: Vec, stake: BalanceOf) - -> Result<(), &'static str> + -> Result<(), sp_runtime::DispatchError> { >::vote(RawOrigin::Signed(caller).into(), votes, stake) - .map_err(|_| "failed to submit vote") } /// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if @@ -131,7 +139,7 @@ fn distribute_voters(mut all_candidates: Vec, num_voters /// Fill the seats of members and runners-up up until `m`. Note that this might include either only /// members, or members and runners-up. fn fill_seats_up_to(m: u32) -> Result, &'static str> { - let candidates = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; + let _ = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; assert_eq!(>::candidates().len() as u32, m, "wrong number of candidates."); >::do_phragmen(); assert_eq!(>::candidates().len(), 0, "some candidates remaining."); @@ -140,7 +148,13 @@ fn fill_seats_up_to(m: u32) -> Result, &'static str> m as usize, "wrong number of members and runners-up", ); - Ok(candidates) + Ok( + >::members() + .into_iter() + .map(|(x, _)| x) + .chain(>::runners_up().into_iter().map(|(x, _)| x)) + .collect() + ) } /// removes all the storage items to reverse any genesis state. @@ -152,50 +166,46 @@ fn clean() { } benchmarks! { - _ { - // User account seed - let u in 0 .. 1000 => (); - } + _ {} // -- Signed ones vote { - let u in ...; - // we fix the number of voted candidates to max - let v = MAXIMUM_VOTE; + let v in 1 .. (MAXIMUM_VOTE as u32); clean::(); // create a bunch of candidates. - let all_candidates = submit_candidates::(MAXIMUM_VOTE as u32, "candidates")?; + let all_candidates = submit_candidates::(v, "candidates")?; - let caller = endowed_account::("caller", u); + let caller = endowed_account::("caller", 0); let stake = default_stake::(BALANCE_FACTOR); // vote for all of them. - let votes = all_candidates.into_iter().take(v).collect(); + let votes = all_candidates; + whitelist!(caller); }: _(RawOrigin::Signed(caller), votes, stake) vote_update { - let u in ...; - // we fix the number of voted candidates to max - let v = MAXIMUM_VOTE; + let v in 1 .. (MAXIMUM_VOTE as u32); clean::(); // create a bunch of candidates. - let all_candidates = submit_candidates::(MAXIMUM_VOTE as u32, "candidates")?; + let all_candidates = submit_candidates::(v, "candidates")?; - let caller = endowed_account::("caller", u); + let caller = endowed_account::("caller", 0); let stake = default_stake::(BALANCE_FACTOR); // original votes. - let mut votes = all_candidates.into_iter().take(v).collect::>(); + let mut votes = all_candidates; submit_voter::(caller.clone(), votes.clone(), stake)?; + // new votes. votes.rotate_left(1); + + whitelist!(caller); }: vote(RawOrigin::Signed(caller), votes, stake) remove_voter { - let u in ...; // we fix the number of voted candidates to max let v = MAXIMUM_VOTE as u32; clean::(); @@ -203,11 +213,12 @@ benchmarks! { // create a bunch of candidates. let all_candidates = submit_candidates::(v, "candidates")?; - let caller = endowed_account::("caller", u); + let caller = endowed_account::("caller", 0); let stake = default_stake::(BALANCE_FACTOR); submit_voter::(caller.clone(), all_candidates, stake)?; + whitelist!(caller); }: _(RawOrigin::Signed(caller)) report_defunct_voter_correct { @@ -217,11 +228,11 @@ benchmarks! { // number of candidates that the reported voter voted for. The worse case of search here is // basically `c * v`. let v in 1 .. (MAXIMUM_VOTE as u32); - // we fix the number of members to when members and runners-up to the desired. We'll be in + // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); - clean::(); + clean::(); let stake = default_stake::(BALANCE_FACTOR); // create m members and runners combined. @@ -231,8 +242,7 @@ benchmarks! { let bailing_candidates = submit_candidates::(v, "bailing_candidates")?; let all_candidates = submit_candidates::(c, "all_candidates")?; - // account 1 is the reporter and it doesn't matter how many it votes. But it has to be a - // voter. + // account 1 is the reporter and must be whitelisted, and a voter. let account_1 = endowed_account::("caller", 0); submit_voter::( account_1.clone(), @@ -248,7 +258,9 @@ benchmarks! { stake, )?; - // all the bailers go away. + // all the bailers go away. NOTE: we can simplify this. There's no need to create all these + // candidates and remove them. The defunct voter can just vote for random accounts as long + // as there are enough members (potential candidates). bailing_candidates.into_iter().for_each(|b| { let count = candidate_count::(); assert!(>::renounce_candidacy( @@ -256,10 +268,13 @@ benchmarks! { Renouncing::Candidate(count), ).is_ok()); }); - let defunct = defunct_for::(account_2.clone()); - }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct) + + let defunct_info = defunct_for::(account_2.clone()); + whitelist!(account_1); + + assert!(>::is_voter(&account_2)); + }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct_info) verify { - assert!(>::is_voter(&account_1)); assert!(!>::is_voter(&account_2)); #[cfg(test)] { @@ -276,7 +291,7 @@ benchmarks! { // number of candidates that the reported voter voted for. The worse case of search here is // basically `c * v`. let v in 1 .. (MAXIMUM_VOTE as u32); - // we fix the number of members to when members and runners-up to the desired. We'll be in + // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -289,7 +304,7 @@ benchmarks! { // create a bunch of candidates as well. let all_candidates = submit_candidates::(c, "candidates")?; - // account 1 is the reporter and it doesn't matter how many it votes. + // account 1 is the reporter and need to be whitelisted, and a voter. let account_1 = endowed_account::("caller", 0); submit_voter::( account_1.clone(), @@ -299,8 +314,9 @@ benchmarks! { // account 2 votes for a bunch of crap, and finally a correct candidate. let account_2 = endowed_account::("caller_2", 1); - let mut invalid: Vec = - (0..(v-1)).map(|seed| account::("invalid", 0, seed).clone()).collect(); + let mut invalid: Vec = (0..(v-1)) + .map(|seed| account::("invalid", 0, seed).clone()) + .collect(); invalid.push(all_candidates.last().unwrap().clone()); submit_voter::( account_2.clone(), @@ -308,11 +324,11 @@ benchmarks! { stake, )?; - let defunct = defunct_for::(account_2.clone()); - // no one bails out. account_1 is slashed and removed as voter now. - }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct) + let defunct_info = defunct_for::(account_2.clone()); + whitelist!(account_1); + }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct_info) verify { - assert!(!>::is_voter(&account_1)); + // account 2 is still a voter. assert!(>::is_voter(&account_2)); #[cfg(test)] { @@ -325,7 +341,7 @@ benchmarks! { submit_candidacy { // number of already existing candidates. let c in 1 .. MAX_CANDIDATES; - // we fix the number of members to when members and runners-up to the desired. We'll be in + // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -340,6 +356,7 @@ benchmarks! { // we assume worse case that: extrinsic is successful and candidate is not duplicate. let candidate_account = endowed_account::("caller", 0); + whitelist!(candidate_account); }: _(RawOrigin::Signed(candidate_account.clone()), candidate_count::()) verify { #[cfg(test)] @@ -355,7 +372,7 @@ benchmarks! { // limited by the runtime bound, nonetheless we fill them by `m`. // number of already existing candidates. let c in 1 .. MAX_CANDIDATES; - // we fix the number of members to when members and runners-up to the desired. We'll be in + // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -367,6 +384,7 @@ benchmarks! { let bailing = all_candidates[0].clone(); // Should be ("caller", 0) let count = candidate_count::(); + whitelist!(bailing); }: renounce_candidacy(RawOrigin::Signed(bailing), Renouncing::Candidate(count)) verify { #[cfg(test)] @@ -377,11 +395,10 @@ benchmarks! { } } - renounce_candidacy_member_runner_up { + renounce_candidacy_members { // removing members and runners will be cheaper than a candidate. // we fix the number of members to when members and runners-up to the desired. We'll be in // this state almost always. - let u in ...; let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); @@ -389,14 +406,34 @@ benchmarks! { let members_and_runners_up = fill_seats_up_to::(m)?; let bailing = members_and_runners_up[0].clone(); - let renouncing = if >::is_member(&bailing) { - Renouncing::Member - } else if >::is_runner_up(&bailing) { - Renouncing::RunnerUp - } else { - panic!("Bailing must be a member or runner-up for this bench to be sane."); - }; - }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), renouncing) + assert!(>::is_member(&bailing)); + + whitelist!(bailing); + }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::Member) + verify { + #[cfg(test)] + { + // reset members in between benchmark tests. + use crate::tests::MEMBERS; + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + } + } + + renounce_candidacy_runners_up { + // removing members and runners will be cheaper than a candidate. + // we fix the number of members to when members and runners-up to the desired. We'll be in + // this state almost always. + let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); + clean::(); + + // create m members and runners combined. + let members_and_runners_up = fill_seats_up_to::(m)?; + + let bailing = members_and_runners_up[T::DesiredMembers::get() as usize + 1].clone(); + assert!(>::is_runner_up(&bailing)); + + whitelist!(bailing); + }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::RunnerUp) verify { #[cfg(test)] { @@ -407,6 +444,7 @@ benchmarks! { } // -- Root ones + #[extra] // this calls into phragmen and consumes a full block for now. remove_member_without_replacement { // worse case is when we remove a member and we have no runner as a replacement. This // triggers phragmen again. The only parameter is how many candidates will compete for the @@ -440,7 +478,6 @@ benchmarks! { remove_member_with_replacement { // easy case. We have a runner up. Nothing will have that much of an impact. m will be // number of members and runners. There is always at least one runner. - let u in ...; let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); @@ -461,7 +498,6 @@ benchmarks! { remove_member_wrong_refund { // The root call by mistake indicated that this will have no replacement, while it has! // this has now consumed a lot of weight and need to refund. - let u in ...; let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); @@ -484,6 +520,7 @@ benchmarks! { } } + #[extra] on_initialize { // if n % TermDuration is zero, then we run phragmen. The weight function must and should // check this as it is cheap to do so. TermDuration is not a storage item, it is a constant @@ -514,6 +551,7 @@ benchmarks! { } } + #[extra] phragmen { // This is just to focus on phragmen in the context of this module. We always select 20 // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. @@ -578,7 +616,11 @@ mod tests { }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_member_runner_up::()); + assert_ok!(test_benchmark_renounce_candidacy_runners_up::()); + }); + + ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { + assert_ok!(test_benchmark_renounce_candidacy_members::()); }); ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { diff --git a/frame/elections-phragmen/src/default_weights.rs b/frame/elections-phragmen/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..4025e61d15af4ad9384865ab7e4ddfbb90d80feb --- /dev/null +++ b/frame/elections-phragmen/src/default_weights.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn vote(v: u32, ) -> Weight { + (91_489_000 as Weight) + .saturating_add((199_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn vote_update(v: u32, ) -> Weight { + (56_511_000 as Weight) + .saturating_add((245_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_voter() -> Weight { + (76_714_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_743_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((31_750_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(7 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_733_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((31_861_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn submit_candidacy(c: u32, ) -> Weight { + (74_714_000 as Weight) + .saturating_add((315_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn renounce_candidacy_candidate(c: u32, ) -> Weight { + (50_408_000 as Weight) + .saturating_add((159_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn renounce_candidacy_members() -> Weight { + (79_626_000 as Weight) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn renounce_candidacy_runners_up() -> Weight { + (49_715_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_member_with_replacement() -> Weight { + (76_572_000 as Weight) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn remove_member_wrong_refund() -> Weight { + (8_777_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } +} diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 50c5de9bc0de46e4599f55eeebee846b0b970b4b..dd816033ae64db8bfb1d9ba69c33973d78d9bc99 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -87,16 +87,16 @@ use codec::{Encode, Decode}; use sp_std::prelude::*; use sp_runtime::{ DispatchError, RuntimeDebug, Perbill, - traits::{Zero, StaticLookup, Convert}, + traits::{Zero, StaticLookup, Convert, Saturating}, }; use frame_support::{ decl_storage, decl_event, ensure, decl_module, decl_error, - weights::{Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}}, + weights::Weight, storage::{StorageMap, IterableStorageMap}, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}, traits::{ Currency, Get, LockableCurrency, LockIdentifier, ReservableCurrency, WithdrawReasons, - ChangeMembers, OnUnbalanced, WithdrawReason, Contains, BalanceStatus, InitializeMembers, + ChangeMembers, OnUnbalanced, WithdrawReason, Contains, InitializeMembers, BalanceStatus, ContainsLengthBound, } }; @@ -104,6 +104,7 @@ use sp_npos_elections::{build_support_map, ExtendedBalance, VoteWeight, Election use frame_system::{ensure_signed, ensure_root}; mod benchmarking; +mod default_weights; /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; @@ -138,35 +139,17 @@ pub struct DefunctVoter { } pub trait WeightInfo { - fn vote(u: u32, ) -> Weight; - fn vote_update(u: u32, ) -> Weight; - fn remove_voter(u: u32, ) -> Weight; + fn vote(v: u32, ) -> Weight; + fn vote_update(v: u32, ) -> Weight; + fn remove_voter() -> Weight; fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight; fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight; fn submit_candidacy(c: u32, ) -> Weight; fn renounce_candidacy_candidate(c: u32, ) -> Weight; - fn renounce_candidacy_member_runner_up(u: u32, ) -> Weight; - fn remove_member_without_replacement(c: u32, ) -> Weight; - fn remove_member_with_replacement(u: u32, ) -> Weight; - fn remove_member_wrong_refund(u: u32, ) -> Weight; - fn on_initialize(c: u32, ) -> Weight; - fn phragmen(c: u32, v: u32, e: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn vote(_u: u32, ) -> Weight { 1_000_000_000 } - fn vote_update(_u: u32, ) -> Weight { 1_000_000_000 } - fn remove_voter(_u: u32, ) -> Weight { 1_000_000_000 } - fn report_defunct_voter_correct(_c: u32, _v: u32, ) -> Weight { 1_000_000_000 } - fn report_defunct_voter_incorrect(_c: u32, _v: u32, ) -> Weight { 1_000_000_000 } - fn submit_candidacy(_c: u32, ) -> Weight { 1_000_000_000 } - fn renounce_candidacy_candidate(_c: u32, ) -> Weight { 1_000_000_000 } - fn renounce_candidacy_member_runner_up(_u: u32, ) -> Weight { 1_000_000_000 } - fn remove_member_without_replacement(_c: u32, ) -> Weight { 1_000_000_000 } - fn remove_member_with_replacement(_u: u32, ) -> Weight { 1_000_000_000 } - fn remove_member_wrong_refund(_u: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize(_c: u32, ) -> Weight { 1_000_000_000 } - fn phragmen(_c: u32, _v: u32, _e: u32, ) -> Weight { 1_000_000_000 } + fn renounce_candidacy_members() -> Weight; + fn renounce_candidacy_runners_up() -> Weight; + fn remove_member_with_replacement() -> Weight; + fn remove_member_wrong_refund() -> Weight; } pub trait Trait: frame_system::Trait { @@ -350,13 +333,14 @@ decl_module! { /// State reads: /// - Candidates.len() + Members.len() + RunnersUp.len() /// - Voting (is_voter) + /// - Lock /// - [AccountBalance(who) (unreserve + total_balance)] /// State writes: /// - Voting /// - Lock /// - [AccountBalance(who) (unreserve -- only when creating a new voter)] /// # - #[weight = 50 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(4, 2)] + #[weight = T::WeightInfo::vote(votes.len() as u32)] fn vote( origin, votes: Vec, @@ -412,7 +396,7 @@ decl_module! { /// - Locks /// - [AccountData(who)] /// # - #[weight = 35 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(1, 2)] + #[weight = T::WeightInfo::remove_voter()] fn remove_voter(origin) { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); @@ -447,15 +431,14 @@ decl_module! { /// - Voting(reporter || target) /// Note: the db access is worse with respect to db, which is when the report is correct. /// # - #[weight = - Weight::from(defunct.candidate_count).saturating_mul(2 * WEIGHT_PER_MICROS) - .saturating_add(Weight::from(defunct.vote_count).saturating_mul(19 * WEIGHT_PER_MICROS)) - .saturating_add(T::DbWeight::get().reads_writes(6, 3)) - ] + #[weight = T::WeightInfo::report_defunct_voter_correct( + defunct.candidate_count, + defunct.vote_count, + )] fn report_defunct_voter( origin, defunct: DefunctVoter<::Source>, - ) { + ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; let target = T::Lookup::lookup(defunct.who)?; @@ -482,19 +465,25 @@ decl_module! { ); let valid = Self::is_defunct_voter(&votes); - if valid { + let maybe_refund = if valid { // reporter will get the voting bond of the target T::Currency::repatriate_reserved(&target, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; // remove the target. They are defunct. Self::do_remove_voter(&target, false); + None } else { // slash the bond of the reporter. let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; T::BadReport::on_unbalanced(imbalance); // remove the reporter. Self::do_remove_voter(&reporter, false); - } + Some(T::WeightInfo::report_defunct_voter_incorrect( + defunct.candidate_count, + defunct.vote_count, + )) + }; Self::deposit_event(RawEvent::VoterReported(target, reporter, valid)); + Ok(maybe_refund.into()) } /// Submit oneself for candidacy. @@ -509,7 +498,6 @@ decl_module! { /// Base weight = 33.33 µs /// Complexity of candidate_count: 0.375 µs /// State reads: - /// - Candidates.len() /// - Candidates /// - Members /// - RunnersUp @@ -518,11 +506,7 @@ decl_module! { /// - [AccountBalance(who)] /// - Candidates /// # - #[weight = - (35 * WEIGHT_PER_MICROS) - .saturating_add(Weight::from(*candidate_count).saturating_mul(375 * WEIGHT_PER_NANOS)) - .saturating_add(T::DbWeight::get().reads_writes(4, 1)) - ] + #[weight = T::WeightInfo::submit_candidacy(*candidate_count)] fn submit_candidacy(origin, #[compact] candidate_count: u32) { let who = ensure_signed(origin)?; @@ -582,23 +566,11 @@ decl_module! { /// State writes: /// - RunnersUp (remove_and_replace_member), /// - [AccountData(who) (unreserve)] - /// - /// Weight note: The call into changeMembers need to be accounted for. /// #[weight = match *renouncing { - Renouncing::Candidate(count) => { - (18 * WEIGHT_PER_MICROS) - .saturating_add(Weight::from(count).saturating_mul(235 * WEIGHT_PER_NANOS)) - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - }, - Renouncing::Member => { - 46 * WEIGHT_PER_MICROS + - T::DbWeight::get().reads_writes(2, 2) - }, - Renouncing::RunnerUp => { - 46 * WEIGHT_PER_MICROS + - T::DbWeight::get().reads_writes(1, 1) - } + Renouncing::Candidate(count) => T::WeightInfo::renounce_candidacy_candidate(count), + Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), + Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(), }] fn renounce_candidacy(origin, renouncing: Renouncing) { let who = ensure_signed(origin)?; @@ -659,7 +631,7 @@ decl_module! { /// Else, since this is a root call and will go into phragmen, we assume full block for now. /// # #[weight = if *has_replacement { - 50 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(3, 2) + T::WeightInfo::remove_member_with_replacement() } else { T::MaximumBlockWeight::get() }] @@ -677,7 +649,7 @@ decl_module! { return Err(Error::::InvalidReplacement.with_weight( // refund. The weight value comes from a benchmark which is special to this. // 5.751 µs - 6 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(1, 0) + T::WeightInfo::remove_member_wrong_refund() )); } // else, prediction was correct. @@ -709,21 +681,21 @@ decl_event!( Balance = BalanceOf, ::AccountId, { - /// A new term with [new_members]. This indicates that enough candidates existed to run the + /// A new term with \[new_members\]. This indicates that enough candidates existed to run the /// election, not that enough have has been elected. The inner value must be examined for - /// this purpose. A `NewTerm([])` indicates that some candidates got their bond slashed and + /// this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond slashed and /// none were elected, whilst `EmptyTerm` means that no candidates existed to begin with. NewTerm(Vec<(AccountId, Balance)>), /// No (or not enough) candidates existed for this round. This is different from - /// `NewTerm([])`. See the description of `NewTerm`. + /// `NewTerm(\[\])`. See the description of `NewTerm`. EmptyTerm, - /// A [member] has been removed. This should always be followed by either `NewTerm` ot + /// A \[member\] has been removed. This should always be followed by either `NewTerm` ot /// `EmptyTerm`. MemberKicked(AccountId), - /// A [member] has renounced their candidacy. + /// A \[member\] has renounced their candidacy. MemberRenounced(AccountId), /// A voter was reported with the the report being successful or not. - /// [voter, reporter, success] + /// \[voter, reporter, success\] VoterReported(AccountId, AccountId, bool), } ); @@ -832,7 +804,9 @@ impl Module { /// Reads Members, RunnersUp, Candidates and Voting(who) from database. fn is_defunct_voter(votes: &[T::AccountId]) -> bool { votes.iter().all(|v| - !Self::is_member(v) && !Self::is_runner_up(v) && !Self::is_candidate(v).is_ok() + !Self::is_member(v) && + !Self::is_runner_up(v) && + !Self::is_candidate(v).is_ok() ) } @@ -904,14 +878,20 @@ impl Module { to_votes(Self::locked_stake_of(who)) }; - let voters_and_votes = Voting::::iter() - .map(|(voter, (stake, targets))| { (voter, to_votes(stake), targets) }) + // used for prime election. + let voters_and_stakes = Voting::::iter() + .map(|(voter, (stake, votes))| { (voter, stake, votes) }) + .collect::>(); + // used for phragmen. + let voters_and_votes = voters_and_stakes.iter() + .cloned() + .map(|(voter, stake, votes)| { (voter, to_votes(stake), votes)} ) .collect::>(); let maybe_phragmen_result = sp_npos_elections::seq_phragmen::( num_to_elect, 0, candidates, - voters_and_votes.clone(), + voters_and_votes, ); if let Some(ElectionResult { winners, assignments }) = maybe_phragmen_result { @@ -965,17 +945,26 @@ impl Module { // save the members, sorted based on account id. new_members.sort_by(|i, j| i.0.cmp(&j.0)); - let mut prime_votes: Vec<_> = new_members.iter().map(|c| (&c.0, VoteWeight::zero())).collect(); - for (_, stake, targets) in voters_and_votes.into_iter() { - for (votes, who) in targets.iter() + // Now we select a prime member using a [Borda count](https://en.wikipedia.org/wiki/Borda_count). + // We weigh everyone's vote for that new member by a multiplier based on the order + // of the votes. i.e. the first person a voter votes for gets a 16x multiplier, + // the next person gets a 15x multiplier, an so on... (assuming `MAXIMUM_VOTE` = 16) + let mut prime_votes: Vec<_> = new_members.iter().map(|c| (&c.0, BalanceOf::::zero())).collect(); + for (_, stake, votes) in voters_and_stakes.into_iter() { + for (vote_multiplier, who) in votes.iter() .enumerate() - .map(|(votes, who)| ((MAXIMUM_VOTE - votes) as u32, who)) + .map(|(vote_position, who)| ((MAXIMUM_VOTE - vote_position) as u32, who)) { if let Ok(i) = prime_votes.binary_search_by_key(&who, |k| k.0) { - prime_votes[i].1 += stake * votes as VoteWeight; + prime_votes[i].1 = prime_votes[i].1.saturating_add( + stake.saturating_mul(vote_multiplier.into()) + ); } } } + // We then select the new member with the highest weighted stake. In the case of + // a tie, the last person in the list with the tied score is selected. This is + // the person with the "highest" account id based on the sort above. let prime = prime_votes.into_iter().max_by_key(|x| x.1).map(|x| x.0.clone()); // new_members_ids is sorted by account id. @@ -1124,7 +1113,7 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -1141,6 +1130,7 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Module; + type MaxLocks = (); type WeightInfo = (); } @@ -1382,7 +1372,7 @@ mod tests { assert!( Elections::members().iter().chain( Elections::runners_up().iter() - ).all(|(_, s)| *s != Zero::zero()) + ).all(|(_, s)| *s != u64::zero()) ); } @@ -1437,15 +1427,15 @@ mod tests { assert_eq!(Elections::term_duration(), 5); assert_eq!(Elections::election_rounds(), 0); - assert_eq!(Elections::members(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); + assert!(Elections::members().is_empty()); + assert!(Elections::runners_up().is_empty()); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_eq!(>::decode_len(), None); assert!(Elections::is_candidate(&1).is_err()); - assert_eq!(all_voters(), vec![]); - assert_eq!(votes_of(&1), vec![]); + assert!(all_voters().is_empty()); + assert!(votes_of(&1).is_empty()); }); } @@ -1520,16 +1510,16 @@ mod tests { assert_eq!(Elections::desired_members(), 2); assert_eq!(Elections::election_rounds(), 0); - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::members_ids().is_empty()); + assert!(Elections::runners_up().is_empty()); + assert!(Elections::candidates().is_empty()); System::set_block_number(5); Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::members_ids().is_empty()); + assert!(Elections::runners_up().is_empty()); + assert!(Elections::candidates().is_empty()); }); } @@ -1572,18 +1562,18 @@ mod tests { assert!(Elections::is_candidate(&2).is_ok()); assert_eq!(Elections::candidates(), vec![1, 2]); - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); + assert!(Elections::members_ids().is_empty()); + assert!(Elections::runners_up().is_empty()); System::set_block_number(5); Elections::end_block(System::block_number()); assert!(Elections::is_candidate(&1).is_err()); assert!(Elections::is_candidate(&2).is_err()); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); + assert!(Elections::members_ids().is_empty()); + assert!(Elections::runners_up().is_empty()); }); } @@ -1611,8 +1601,8 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members_ids(), vec![5]); - assert_eq!(Elections::runners_up(), vec![]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::runners_up().is_empty()); + assert!(Elections::candidates().is_empty()); assert_noop!( submit_candidacy(Origin::signed(5)), @@ -1726,7 +1716,7 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); }); @@ -1749,7 +1739,7 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); @@ -1775,7 +1765,7 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members_ids(), vec![3, 5]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); }); @@ -1860,7 +1850,7 @@ mod tests { assert_ok!(Elections::remove_voter(Origin::signed(2))); assert_eq_uvec!(all_voters(), vec![3]); - assert_eq!(votes_of(&2), vec![]); + assert!(votes_of(&2).is_empty()); assert_eq!(Elections::locked_stake_of(&2), 0); assert_eq!(balances(&2), (20, 0)); @@ -1882,7 +1872,7 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![5], 20)); assert_ok!(Elections::remove_voter(Origin::signed(2))); - assert_eq!(all_voters(), vec![]); + assert!(all_voters().is_empty()); assert_noop!(Elections::remove_voter(Origin::signed(2)), Error::::MustBeVoter); }); @@ -1992,7 +1982,7 @@ mod tests { assert_eq!(Elections::members_ids(), vec![4, 5]); assert_eq!(Elections::runners_up_ids(), vec![6]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); // all of them have a member or runner-up that they voted for. assert_eq!(Elections::is_defunct_voter(&votes_of(&5)), false); @@ -2028,7 +2018,7 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_eq!(balances(&3), (28, 2)); assert_eq!(balances(&5), (45, 5)); @@ -2056,7 +2046,7 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_eq!(balances(&4), (35, 5)); assert_eq!(balances(&5), (45, 5)); @@ -2097,9 +2087,9 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members(), vec![(3, 30), (5, 20)]); - assert_eq!(Elections::runners_up(), vec![]); + assert!(Elections::runners_up().is_empty()); assert_eq_uvec!(all_voters(), vec![2, 3, 4]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_eq!(>::decode_len(), None); assert_eq!(Elections::election_rounds(), 1); @@ -2164,9 +2154,9 @@ mod tests { System::set_block_number(5); Elections::end_block(System::block_number()); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_eq!(Elections::election_rounds(), 1); - assert_eq!(Elections::members_ids(), vec![]); + assert!(Elections::members_ids().is_empty()); assert_eq!( System::events().iter().last().unwrap().event, @@ -2277,7 +2267,7 @@ mod tests { System::set_block_number(10); Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![]); + assert!(Elections::members_ids().is_empty()); assert_eq!(balances(&5), (47, 0)); }); @@ -2362,7 +2352,7 @@ mod tests { assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); // no new candidates but old members and runners-up are always added. - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); assert_eq!(Elections::election_rounds(), b / 5); assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]); }; @@ -2418,7 +2408,7 @@ mod tests { assert_err_with_weight!( Elections::remove_member(Origin::root(), 4, true), Error::::InvalidReplacement, - Some(6000000), + Some(33777000), // only thing that matters for now is that it is NOT the full block. ); }); @@ -2440,7 +2430,7 @@ mod tests { assert_err_with_weight!( Elections::remove_member(Origin::root(), 4, false), Error::::InvalidReplacement, - Some(6000000) // only thing that matters for now is that it is NOT the full block. + Some(33777000) // only thing that matters for now is that it is NOT the full block. ); }); } @@ -2474,7 +2464,7 @@ mod tests { // meanwhile, no one cares to become a candidate again. System::set_block_number(10); Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![]); + assert!(Elections::members_ids().is_empty()); assert_eq!(Elections::election_rounds(), 2); }); } @@ -2642,14 +2632,14 @@ mod tests { Elections::end_block(System::block_number()); assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![]); + assert!(Elections::runners_up_ids().is_empty()); assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. // no replacement assert_eq!(Elections::members_ids(), vec![5]); - assert_eq!(Elections::runners_up_ids(), vec![]); + assert!(Elections::runners_up_ids().is_empty()); }) } @@ -2713,7 +2703,7 @@ mod tests { assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(1))); assert_eq!(balances(&5), (50, 0)); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); }) } @@ -2825,7 +2815,7 @@ mod tests { assert_eq!(Elections::members_ids(), vec![1, 4]); assert_eq!(Elections::runners_up_ids(), vec![2, 3]); - assert_eq!(Elections::candidates(), vec![]); + assert!(Elections::candidates().is_empty()); }) } } diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index 8226512a6263f05975c1d4ab01ad9ca428756aa7..f0281a3033dd28dd8a1c34f2bafa4cf45897dcd5 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-elections" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for elections" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +hex-literal = "0.3.1" +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/elections/README.md b/frame/elections/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1f6fd42331c15ef172346f733488d36fa4d0ed80 --- /dev/null +++ b/frame/elections/README.md @@ -0,0 +1,7 @@ +Election module for stake-weighted membership selection of a collective. + +The composition of a set of account IDs works according to one or more approval votes +weighted by stake. There is a partial carry-over facility to give greater weight to those +whose voting is serially unsuccessful. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index 1453e2f0fd9fcbcf3fda66af76b91905dd2446ff..9b61a9b3509a98b11cf2b7495e796c0c312b9c53 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -15,6 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! # WARNING: NOT ACTIVELY MAINTAINED +//! +//! This pallet is currently not maintained and should not be used in production until further +//! notice. +//! +//! --- +//! //! Election module for stake-weighted membership selection of a collective. //! //! The composition of a set of account IDs works according to one or more approval votes @@ -700,14 +707,14 @@ decl_module! { decl_event!( pub enum Event where ::AccountId { - /// Reaped [voter, reaper]. + /// Reaped \[voter, reaper\]. VoterReaped(AccountId, AccountId), - /// Slashed [reaper]. + /// Slashed \[reaper\]. BadReaperSlashed(AccountId), - /// A tally (for approval votes of [seats]) has started. + /// A tally (for approval votes of \[seats\]) has started. TallyStarted(u32), /// A tally (for approval votes of seat(s)) has ended (with one or more new members). - /// [incoming, outgoing] + /// \[incoming, outgoing\] TallyFinalized(Vec, Vec), } ); diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index c9b2523c4bc8a8dbb705b541bf5bd4e37525dd39..deec77da7b8377c963ef7839c77e85fd3045e40b 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -59,7 +59,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -70,6 +70,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/elections/src/tests.rs b/frame/elections/src/tests.rs index 247b6272524b1c64073e4a6157af1b2a5e2bd3a3..92f6e11252b0584ab5cb2f004474a045543fe2c5 100644 --- a/frame/elections/src/tests.rs +++ b/frame/elections/src/tests.rs @@ -46,14 +46,14 @@ fn params_should_work() { assert_eq!(Elections::voters(0), Vec::>::new()); assert_eq!(Elections::voter_info(1), None); - assert_eq!(Elections::all_approvals_of(&1), vec![]); + assert!(Elections::all_approvals_of(&1).is_empty()); }); } #[test] fn chunking_bool_to_flag_should_work() { ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::bool_to_flag(vec![]), vec![]); + assert!(Elections::bool_to_flag(vec![]).is_empty()); assert_eq!(Elections::bool_to_flag(vec![false]), vec![0]); assert_eq!(Elections::bool_to_flag(vec![true]), vec![1]); assert_eq!(Elections::bool_to_flag(vec![true, true, true, true]), vec![15]); @@ -274,11 +274,11 @@ fn chunking_approval_storage_should_work() { assert_eq!(Elections::all_approvals_of(&2), vec![true]); // NOTE: these two are stored in mem differently though. - assert_eq!(Elections::all_approvals_of(&3), vec![]); - assert_eq!(Elections::all_approvals_of(&4), vec![]); + assert!(Elections::all_approvals_of(&3).is_empty()); + assert!(Elections::all_approvals_of(&4).is_empty()); assert_eq!(Elections::approvals_of((3, 0)), vec![0]); - assert_eq!(Elections::approvals_of((4, 0)), vec![]); + assert!(Elections::approvals_of((4, 0)).is_empty()); }); } @@ -385,7 +385,7 @@ fn voting_locking_stake_and_reserving_bond_works() { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_eq!(balances(&2), (20, 0)); - assert_eq!(locks(&2), vec![]); + assert!(locks(&2).is_empty()); assert_ok!(Elections::set_approvals(Origin::signed(2), vec![], 0, 0, 15)); assert_eq!(balances(&2), (18, 2)); assert_eq!(locks(&2), vec![15]); @@ -401,7 +401,7 @@ fn voting_locking_stake_and_reserving_bond_works() { assert_ok!(Elections::retract_voter(Origin::signed(2), 0)); assert_eq!(balances(&2), (102, 0)); - assert_eq!(locks(&2), vec![]); + assert!(locks(&2).is_empty()); }); } diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 43ecc6f3688cfa0b9709455300481bcc683d84cb..a228dfb566be28011a8a9bdf84aa150ba07de781 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-evm" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME EVM contracts pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -primitive-types = { version = "0.7.0", default-features = false, features = ["rlp"] } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +primitive-types = { version = "0.7.0", default-features = false, features = ["rlp", "byteorder"] } rlp = { version = "0.4", default-features = false } evm = { version = "0.17", default-features = false } sha3 = { version = "0.8", default-features = false } diff --git a/frame/evm/README.md b/frame/evm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f8feadbf58eb41acd459e26e24841325f54c8783 --- /dev/null +++ b/frame/evm/README.md @@ -0,0 +1,3 @@ +EVM execution module for Substrate + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 211946bed0e95f4e8e33987d456935e9b05e1fcf..dddb71fc02a74bc2de5a059042e977837d54b14a 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -261,17 +261,17 @@ decl_event! { { /// Ethereum events from contracts. Log(Log), - /// A contract has been created at given [address]. + /// A contract has been created at given \[address\]. Created(H160), - /// A [contract] was attempted to be created, but the execution failed. + /// A \[contract\] was attempted to be created, but the execution failed. CreatedFailed(H160), - /// A [contract] has been executed successfully with states applied. + /// A \[contract\] has been executed successfully with states applied. Executed(H160), - /// A [contract] has been executed with errors. States are reverted with only gas fees applied. + /// A \[contract\] has been executed with errors. States are reverted with only gas fees applied. ExecutedFailed(H160), - /// A deposit has been made at a given address. [sender, address, value] + /// A deposit has been made at a given address. \[sender, address, value\] BalanceDeposit(AccountId, H160, U256), - /// A withdrawal has been made from a given address. [sender, address, value] + /// A withdrawal has been made from a given address. \[sender, address, value\] BalanceWithdraw(AccountId, H160, U256), } } @@ -333,14 +333,14 @@ decl_module! { input, value, gas_limit, - Some(gas_price), + gas_price, nonce, true, )? { - (ExitReason::Succeed(_), _, _) => { + (ExitReason::Succeed(_), _, _, _) => { Module::::deposit_event(Event::::Executed(target)); }, - (_, _, _) => { + (_, _, _, _) => { Module::::deposit_event(Event::::ExecutedFailed(target)); }, } @@ -367,14 +367,14 @@ decl_module! { init, value, gas_limit, - Some(gas_price), + gas_price, nonce, true, )? { - (ExitReason::Succeed(_), create_address, _) => { + (ExitReason::Succeed(_), create_address, _, _) => { Module::::deposit_event(Event::::Created(create_address)); }, - (_, create_address, _) => { + (_, create_address, _, _) => { Module::::deposit_event(Event::::CreatedFailed(create_address)); }, } @@ -402,14 +402,14 @@ decl_module! { salt, value, gas_limit, - Some(gas_price), + gas_price, nonce, true, )? { - (ExitReason::Succeed(_), create_address, _) => { + (ExitReason::Succeed(_), create_address, _, _) => { Module::::deposit_event(Event::::Created(create_address)); }, - (_, create_address, _) => { + (_, create_address, _, _) => { Module::::deposit_event(Event::::CreatedFailed(create_address)); }, } @@ -482,10 +482,10 @@ impl Module { init: Vec, value: U256, gas_limit: u32, - gas_price: Option, + gas_price: U256, nonce: Option, apply_state: bool, - ) -> Result<(ExitReason, H160, U256), Error> { + ) -> Result<(ExitReason, H160, U256, Vec), Error> { Self::execute_evm( source, value, @@ -514,10 +514,10 @@ impl Module { salt: H256, value: U256, gas_limit: u32, - gas_price: Option, + gas_price: U256, nonce: Option, apply_state: bool, - ) -> Result<(ExitReason, H160, U256), Error> { + ) -> Result<(ExitReason, H160, U256, Vec), Error> { let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice()); Self::execute_evm( source, @@ -548,10 +548,10 @@ impl Module { input: Vec, value: U256, gas_limit: u32, - gas_price: Option, + gas_price: U256, nonce: Option, apply_state: bool, - ) -> Result<(ExitReason, Vec, U256), Error> { + ) -> Result<(ExitReason, Vec, U256, Vec), Error> { Self::execute_evm( source, value, @@ -574,20 +574,18 @@ impl Module { source: H160, value: U256, gas_limit: u32, - gas_price: Option, + gas_price: U256, nonce: Option, apply_state: bool, f: F, - ) -> Result<(ExitReason, R, U256), Error> where + ) -> Result<(ExitReason, R, U256, Vec), Error> where F: FnOnce(&mut StackExecutor>) -> (ExitReason, R), { - let gas_price = match gas_price { - Some(gas_price) => { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); - gas_price - }, - None => U256::zero(), - }; + + // Gas price check is skipped when performing a gas estimation. + if apply_state { + ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); + } let vicinity = Vicinity { gas_price, @@ -629,11 +627,19 @@ impl Module { ); executor.deposit(source, total_fee.saturating_sub(actual_fee)); + let (values, logs) = executor.deconstruct(); + let logs_data = logs.into_iter().map(|x| x ).collect::>(); + let logs_result = logs_data.clone().into_iter().map(|it| { + Log { + address: it.address, + topics: it.topics, + data: it.data + } + }).collect(); if apply_state { - let (values, logs) = executor.deconstruct(); - backend.apply(values, logs, true); + backend.apply(values, logs_data, true); } - Ok((retv, reason, used_gas)) + Ok((retv, reason, used_gas, logs_result)) } } diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs index 652d6c723b9d337ba42fbd62e3445dee64fa253f..d05fdca1407e5678fbae2d93d0914185b256436e 100644 --- a/frame/evm/src/tests.rs +++ b/frame/evm/src/tests.rs @@ -52,7 +52,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -63,6 +63,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type DustRemoval = (); type Event = (); diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 5f11bd54a45fc379e0d36ba36ed17a39edac93aa..8478ddec29053b60c4c8c89017067788c8ddfdca 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -1,25 +1,26 @@ [package] name = "pallet-example-offchain-worker" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet for offchain worker" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } lite-json = { version = "0.1", default-features = false } [features] diff --git a/frame/example-offchain-worker/README.md b/frame/example-offchain-worker/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4da1a4c15f8149c227618d05c077a22b4d72a707 --- /dev/null +++ b/frame/example-offchain-worker/README.md @@ -0,0 +1,26 @@ +# Offchain Worker Example Module + +The Offchain Worker Example: A simple pallet demonstrating +concepts, APIs and structures common to most offchain workers. + +Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's +documentation. + +- [`pallet_example_offchain_worker::Trait`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/enum.Call.html) +- [`Module`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/struct.Module.html) + + +## Overview + +In this example we are going to build a very simplistic, naive and definitely NOT +production-ready oracle for BTC/USD price. +Offchain Worker (OCW) will be triggered after every block, fetch the current price +and prepare either signed or unsigned transaction to feed the result back on chain. +The on-chain logic will simply aggregate the results and store last `64` values to compute +the average price. +Additional logic in OCW is put in place to prevent spamming the network with both signed +and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only +one unsigned transaction floating in the network. + +License: Unlicense \ No newline at end of file diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index f6a4a68e3cb3deb0be54913f78209cb6f3f6ae55..8e02a09484ef5d538a3dbdae06179e2b919ad95b 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -165,8 +165,8 @@ decl_storage! { decl_event!( /// Events generated by the module. pub enum Event where AccountId = ::AccountId { - /// Event generated when new price is accepted to contribute to the average. - /// [price, who] + /// Event generated when new price is accepted to contribute to the average. + /// \[price, who\] NewPrice(u32, AccountId), } ); @@ -461,16 +461,6 @@ impl Module { // Note this call will block until response is received. let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; - // Received price is wrapped into a call to `submit_price_unsigned` public function of this - // pallet. This means that the transaction, when executed, will simply call that function - // passing `price` as an argument. - let call = Call::submit_price_unsigned(block_number, price); - - // Now let's create a transaction out of this call and submit it to the pool. - // Here we showcase two ways to send an unsigned transaction with a signed payload - SubmitTransaction::>::submit_unsigned_transaction(call.into()) - .map_err(|()| "Unable to submit unsigned transaction.")?; - // -- Sign using any account let (_, result) = Signer::::any_account().send_unsigned_transaction( |account| PricePayload { @@ -500,16 +490,6 @@ impl Module { // Note this call will block until response is received. let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; - // Received price is wrapped into a call to `submit_price_unsigned` public function of this - // pallet. This means that the transaction, when executed, will simply call that function - // passing `price` as an argument. - let call = Call::submit_price_unsigned(block_number, price); - - // Now let's create a transaction out of this call and submit it to the pool. - // Here we showcase two ways to send an unsigned transaction with a signed payload - SubmitTransaction::>::submit_unsigned_transaction(call.into()) - .map_err(|()| "Unable to submit unsigned transaction.")?; - // -- Sign using all accounts let transaction_results = Signer::::all_accounts() .send_unsigned_transaction( diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index 4e7e4def2ba6852d8ec64ede669c3cfbd8b1f705..b71329b5ee1b8c803d7666c01c551ea2b3b3853c 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -74,7 +74,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 2f7af90d76d2f7220e5209206ced0a5721b23f60..41889ea4828d03b975424cb474148cdc9ff56a7e 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-example" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,17 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core", default-features = false } +sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } [features] default = ["std"] diff --git a/frame/example/README.md b/frame/example/README.md new file mode 100644 index 0000000000000000000000000000000000000000..05ef4cd4351cf90fd31513fa0591bbbe9ea0d021 --- /dev/null +++ b/frame/example/README.md @@ -0,0 +1,237 @@ +# Example Pallet + + +The Example: A simple example of a FRAME pallet demonstrating +concepts, APIs and structures common to most FRAME runtimes. + +Run `cargo doc --package pallet-example --open` to view this pallet's documentation. + +### Documentation Guidelines: + + + +

    +
  • Documentation comments (i.e. /// comment) - should + accompany pallet functions and be restricted to the pallet interface, + not the internals of the pallet implementation. Only state inputs, + outputs, and a brief description that mentions whether calling it + requires root, but without repeating the source code details. + Capitalize the first word of each documentation comment and end it with + a full stop. See + Generic example of annotating source code with documentation comments
  • +
  • Self-documenting code - Try to refactor code to be self-documenting.
  • +
  • Code comments - Supplement complex code with a brief explanation, not every line of code.
  • +
  • Identifiers - surround by backticks (i.e. INHERENT_IDENTIFIER, InherentType, + u64)
  • +
  • Usage scenarios - should be simple doctests. The compiler should ensure they stay valid.
  • +
  • Extended tutorials - should be moved to external files and refer to.
  • + +
  • Mandatory - include all of the sections/subsections where MUST is specified.
  • +
  • Optional - optionally include sections/subsections where CAN is specified.
  • +
+ +### Documentation Template:
+ +Copy and paste this template from frame/example/src/lib.rs into file +`frame//src/lib.rs` of your own custom pallet and complete it. +

+// Add heading with custom pallet name
+
+\#  Pallet
+
+// Add simple description
+
+// Include the following links that shows what trait needs to be implemented to use the pallet
+// and the supported dispatchables that are documented in the Call enum.
+
+- \[`::Trait`](https://docs.rs/pallet-example/latest/pallet_example/trait.Trait.html)
+- \[`Call`](https://docs.rs/pallet-example/latest/pallet_example/enum.Call.html)
+- \[`Module`](https://docs.rs/pallet-example/latest/pallet_example/struct.Module.html)
+
+\## Overview
+
+
+// Short description of pallet's purpose.
+// Links to Traits that should be implemented.
+// What this pallet is for.
+// What functionality the pallet provides.
+// When to use the pallet (use case examples).
+// How it is used.
+// Inputs it uses and the source of each input.
+// Outputs it produces.
+
+
+
+
+\## Terminology
+
+// Add terminology used in the custom pallet. Include concepts, storage items, or actions that you think
+// deserve to be noted to give context to the rest of the documentation or pallet usage. The author needs to
+// use some judgment about what is included. We don't want a list of every storage item nor types - the user
+// can go to the code for that. For example, "transfer fee" is obvious and should not be included, but
+// "free balance" and "reserved balance" should be noted to give context to the pallet.
+// Please do not link to outside resources. The reference docs should be the ultimate source of truth.
+
+
+
+\## Goals
+
+// Add goals that the custom pallet is designed to achieve.
+
+
+
+\### Scenarios
+
+
+
+\#### 
+
+// Describe requirements prior to interacting with the custom pallet.
+// Describe the process of interacting with the custom pallet for this scenario and public API functions used.
+
+\## Interface
+
+\### Supported Origins
+
+// What origins are used and supported in this pallet (root, signed, none)
+// i.e. root when \`ensure_root\` used
+// i.e. none when \`ensure_none\` used
+// i.e. signed when \`ensure_signed\` used
+
+\`inherent\` 
+
+
+
+
+\### Types
+
+// Type aliases. Include any associated types and where the user would typically define them.
+
+\`ExampleType\` 
+
+
+
+// Reference documentation of aspects such as `storageItems` and `dispatchable` functions should only be
+// included in the https://docs.rs Rustdocs for Substrate and not repeated in the README file.
+
+\### Dispatchable Functions
+
+
+
+// A brief description of dispatchable functions and a link to the rustdoc with their actual documentation.
+
+// MUST have link to Call enum
+// MUST have origin information included in function doc
+// CAN have more info up to the user
+
+\### Public Functions
+
+
+
+// A link to the rustdoc and any notes about usage in the pallet, not for specific functions.
+// For example, in the Balances Pallet: "Note that when using the publicly exposed functions,
+// you (the runtime developer) are responsible for implementing any necessary checks
+// (e.g. that the sender is the signer) before calling a function that will affect storage."
+
+
+
+// It is up to the writer of the respective pallet (with respect to how much information to provide).
+
+\#### Public Inspection functions - Immutable (getters)
+
+// Insert a subheading for each getter function signature
+
+\##### \`example_getter_name()\`
+
+// What it returns
+// Why, when, and how often to call it
+// When it could panic or error
+// When safety issues to consider
+
+\#### Public Mutable functions (changing state)
+
+// Insert a subheading for each setter function signature
+
+\##### \`example_setter_name(origin, parameter_name: T::ExampleType)\`
+
+// What state it changes
+// Why, when, and how often to call it
+// When it could panic or error
+// When safety issues to consider
+// What parameter values are valid and why
+
+\### Storage Items
+
+// Explain any storage items included in this pallet
+
+\### Digest Items
+
+// Explain any digest items included in this pallet
+
+\### Inherent Data
+
+// Explain what inherent data (if any) is defined in the pallet and any other related types
+
+\### Events:
+
+// Insert events for this pallet if any
+
+\### Errors:
+
+// Explain what generates errors
+
+\## Usage
+
+// Insert 2-3 examples of usage and code snippets that show how to
+// use  Pallet in a custom pallet.
+
+\### Prerequisites
+
+// Show how to include necessary imports for  and derive
+// your pallet configuration trait with the `INSERT_CUSTOM_PALLET_NAME` trait.
+
+\```rust
+use ;
+
+pub trait Trait: ::Trait { }
+\```
+
+\### Simple Code Snippet
+
+// Show a simple example (e.g. how to query a public getter function of )
+
+\### Example from FRAME
+
+// Show a usage example in an actual runtime
+
+// See:
+// - Substrate TCR https://github.com/parity-samples/substrate-tcr
+// - Substrate Kitties https://shawntabrizi.github.io/substrate-collectables-workshop/#/
+
+\## Genesis Config
+
+
+
+\## Dependencies
+
+// Dependencies on other FRAME pallets and the genesis config should be mentioned,
+// but not the Rust Standard Library.
+// Genesis configuration modifications that may be made to incorporate this pallet
+// Interaction with other pallets
+
+
+
+\## Related Pallets
+
+// Interaction with other pallets in the form of a bullet point list
+
+\## References
+
+
+
+// Links to reference material, if applicable. For example, Phragmen, W3F research, etc.
+// that the implementation is based on.
+

+ +License: Unlicense \ No newline at end of file diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index b41c8196c018fd5b3c7cccb8a441d3b99c0ec897..230aacbc01d5a0ee2b73a630c60b298d4fab046d 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -764,7 +764,7 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -774,6 +774,7 @@ mod tests { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type DustRemoval = (); type Event = (); @@ -842,7 +843,7 @@ mod tests { WatchDummy::(PhantomData).validate(&1, &call, &info, 150) .unwrap() .priority, - Bounded::max_value(), + u64::max_value(), ); assert_eq!( WatchDummy::(PhantomData).validate(&1, &call, &info, 250), diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index ea123bd6e7fb8f311cf6281f8446e2b918a2a9a6..3bd8da04e6cf1cd1083dbfd54c5ca43eedc91e36 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -1,42 +1,48 @@ [package] name = "frame-executive" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME executives engine" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/tracing" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0", default-features = false, path = "../../primitives/tracing" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } [dev-dependencies] -hex-literal = "0.2.1" -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -pallet-indices = { version = "2.0.0-rc5", path = "../indices" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-transaction-payment = { version = "2.0.0-rc5", path = "../transaction-payment" } -sp-version = { version = "2.0.0-rc5", path = "../../primitives/version" } +hex-literal = "0.3.1" +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "2.0.0", path = "../../primitives/io" } +pallet-indices = { version = "2.0.0", path = "../indices" } +pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } +sp-version = { version = "2.0.0", path = "../../primitives/version" } [features] default = ["std"] +with-tracing = [ + "sp-tracing/with-tracing" +] std = [ "codec/std", "frame-support/std", "frame-system/std", "serde", + "sp-core/std", "sp-runtime/std", "sp-tracing/std", "sp-std/std", diff --git a/frame/executive/README.md b/frame/executive/README.md new file mode 100644 index 0000000000000000000000000000000000000000..24b354902e8762737c762e84d25cc6e968773016 --- /dev/null +++ b/frame/executive/README.md @@ -0,0 +1,61 @@ +# Executive Module + +The Executive module acts as the orchestration layer for the runtime. It dispatches incoming +extrinsic calls to the respective modules in the runtime. + +## Overview + +The executive module is not a typical pallet providing functionality around a specific feature. +It is a cross-cutting framework component for the FRAME. It works in conjunction with the +[FRAME System module](https://docs.rs/frame-system/latest/frame_system/) to perform these cross-cutting functions. + +The Executive module provides functions to: + +- Check transaction validity. +- Initialize a block. +- Apply extrinsics. +- Execute a block. +- Finalize a block. +- Start an off-chain worker. + +### Implementations + +The Executive module provides the following implementations: + +- `ExecuteBlock`: Trait that can be used to execute a block. +- `Executive`: Type that can be used to make the FRAME available from the runtime. + +## Usage + +The default Substrate node template declares the [`Executive`](https://docs.rs/frame-executive/latest/frame_executive/struct.Executive.html) type in its library. + +### Example + +`Executive` type declaration from the node template. + +```rust +# +/// Executive: handles dispatch to the various modules. +pub type Executive = executive::Executive; +``` + +### Custom `OnRuntimeUpgrade` logic + +You can add custom logic that should be called in your runtime on a runtime upgrade. This is +done by setting an optional generic parameter. The custom logic will be called before +the on runtime upgrade logic of all modules is called. + +```rust +# +struct CustomOnRuntimeUpgrade; +impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Do whatever you want. + 0 + } +} + +pub type Executive = executive::Executive; +``` + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 24dccf8b0b4a41f97e996e2e005d65051ff9a782..7a5f39ccd8f48eaf893b93852aa3dc463c8f1d85 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -207,6 +207,8 @@ where { /// Start the execution of a particular block. pub fn initialize_block(header: &System::Header) { + sp_io::init_tracing(); + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); let digests = Self::extract_pre_digest(&header); Self::initialize_block_impl( header.number(), @@ -270,6 +272,7 @@ where } fn initial_checks(block: &Block) { + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "initial_checks"); let header = block.header(); // Check that `parent_hash` is correct. @@ -288,23 +291,28 @@ where /// Actually execute all transitions for `block`. pub fn execute_block(block: Block) { - Self::initialize_block(block.header()); + sp_io::init_tracing(); + sp_tracing::within_span! { + sp_tracing::info_span!( "execute_block", ?block); + { + Self::initialize_block(block.header()); - // any initial checks - Self::initial_checks(&block); + // any initial checks + Self::initial_checks(&block); - let signature_batching = sp_runtime::SignatureBatching::start(); + let signature_batching = sp_runtime::SignatureBatching::start(); - // execute extrinsics - let (header, extrinsics) = block.deconstruct(); - Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); + // execute extrinsics + let (header, extrinsics) = block.deconstruct(); + Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); - if !signature_batching.verify() { - panic!("Signature verification failed."); - } + if !signature_batching.verify() { + panic!("Signature verification failed."); + } - // any final checks - Self::final_checks(&header); + // any final checks + Self::final_checks(&header); + } }; } /// Execute given extrinsics and take care of post-extrinsics book-keeping. @@ -320,6 +328,8 @@ where /// Finalize the block - it is up the caller to ensure that all header fields are valid /// except state-root. pub fn finalize_block() -> System::Header { + sp_io::init_tracing(); + sp_tracing::enter_span!( sp_tracing::Level::TRACE, "finalize_block" ); >::note_finished_extrinsics(); let block_number = >::block_number(); as OnFinalize>::on_finalize(block_number); @@ -335,6 +345,7 @@ where /// This doesn't attempt to validate anything regarding the block, but it builds a list of uxt /// hashes. pub fn apply_extrinsic(uxt: Block::Extrinsic) -> ApplyExtrinsicResult { + sp_io::init_tracing(); let encoded = uxt.encode(); let encoded_len = encoded.len(); Self::apply_extrinsic_with_len(uxt, encoded_len, Some(encoded)) @@ -355,6 +366,10 @@ where encoded_len: usize, to_note: Option>, ) -> ApplyExtrinsicResult { + sp_tracing::enter_span!( + sp_tracing::info_span!("apply_extrinsic", + ext=?sp_core::hexdisplay::HexDisplay::from(&uxt.encode())) + ); // Verify that the signature is good. let xt = uxt.check(&Default::default())?; @@ -377,6 +392,7 @@ where } fn final_checks(header: &System::Header) { + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "final_checks"); // remove temporaries let new_header = >::finalize(); @@ -406,24 +422,32 @@ where source: TransactionSource, uxt: Block::Extrinsic, ) -> TransactionValidity { - use sp_tracing::tracing_span; + sp_io::init_tracing(); + use sp_tracing::{enter_span, within_span}; - sp_tracing::enter_span!("validate_transaction"); + enter_span!{ sp_tracing::Level::TRACE, "validate_transaction" }; - let encoded_len = tracing_span!{ "using_encoded"; uxt.using_encoded(|d| d.len()) }; + let encoded_len = within_span!{ sp_tracing::Level::TRACE, "using_encoded"; + uxt.using_encoded(|d| d.len()) + }; - let xt = tracing_span!{ "check"; uxt.check(&Default::default())? }; + let xt = within_span!{ sp_tracing::Level::TRACE, "check"; + uxt.check(&Default::default()) + }?; - let dispatch_info = tracing_span!{ "dispatch_info"; xt.get_dispatch_info() }; + let dispatch_info = within_span!{ sp_tracing::Level::TRACE, "dispatch_info"; + xt.get_dispatch_info() + }; - tracing_span! { - "validate"; + within_span! { + sp_tracing::Level::TRACE, "validate"; xt.validate::(source, &dispatch_info, encoded_len) } } /// Start an offchain worker and generate extrinsics. pub fn offchain_worker(header: &System::Header) { + sp_io::init_tracing(); // We need to keep events available for offchain workers, // hence we initialize the block manually. // OffchainWorker RuntimeApi should skip initialization. @@ -567,7 +591,7 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = RuntimeVersion; - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -584,6 +608,7 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type MaxLocks = (); type WeightInfo = (); } @@ -713,7 +738,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("e8ff7b3dd4375f6f3a76e24a1999e2a7be2d15b353e49ac94ace1eae3e80eb87").into(), + state_root: hex!("465a1569d309039bdf84b0479d28064ea29e6584584dc7d788904bb14489c6f6").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, diff --git a/frame/finality-tracker/Cargo.toml b/frame/finality-tracker/Cargo.toml index 9b54717e4dbb14e27749b3761a1174dc6b2d127a..b8597acc3bc0797e0fca17c73fce48146bc9bbf4 100644 --- a/frame/finality-tracker/Cargo.toml +++ b/frame/finality-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME Pallet that tracks the last finalized block, as perceived by block authors." documentation = "https://docs.rs/pallet-finality-tracker" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,17 +17,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/finality-tracker" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-finality-tracker = { version = "2.0.0", default-features = false, path = "../../primitives/finality-tracker" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/finality-tracker/README.md b/frame/finality-tracker/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bf42605ffc6f99029e015d40b98d6bada2ab40f8 --- /dev/null +++ b/frame/finality-tracker/README.md @@ -0,0 +1,3 @@ +FRAME Pallet that tracks the last finalized block, as perceived by block authors. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/finality-tracker/src/lib.rs b/frame/finality-tracker/src/lib.rs index 58f16d72766ebbfe2faf018a7470859c4686c7d0..3b38c4c20033f56dd10789abb8e32226266bd724 100644 --- a/frame/finality-tracker/src/lib.rs +++ b/frame/finality-tracker/src/lib.rs @@ -273,7 +273,7 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/generic-asset/Cargo.toml b/frame/generic-asset/Cargo.toml deleted file mode 100644 index e1e5903062796b28c7ac50c51387054b452b3704..0000000000000000000000000000000000000000 --- a/frame/generic-asset/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "pallet-generic-asset" -version = "2.0.0-rc5" -authors = ["Centrality Developers "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME pallet for generic asset management" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } - -[dev-dependencies] -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } - -[features] -default = ["std"] -std =[ - "serde/std", - "codec/std", - "sp-std/std", - "sp-runtime/std", - "frame-support/std", - "frame-system/std", -] diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs deleted file mode 100644 index 881d89439ec7b15d84d4c91d537ca571c36ca618..0000000000000000000000000000000000000000 --- a/frame/generic-asset/src/lib.rs +++ /dev/null @@ -1,1367 +0,0 @@ -// Copyright 2019-2020 -// by Centrality Investments Ltd. -// and Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! # Generic Asset Module -//! -//! The Generic Asset module provides functionality for handling accounts and asset balances. -//! -//! ## Overview -//! -//! The Generic Asset module provides functions for: -//! -//! - Creating a new kind of asset. -//! - Setting permissions of an asset. -//! - Getting and setting free balances. -//! - Retrieving total, reserved and unreserved balances. -//! - Repatriating a reserved balance to a beneficiary account. -//! - Transferring a balance between accounts (when not reserved). -//! - Slashing an account balance. -//! - Managing total issuance. -//! - Setting and managing locks. -//! -//! ### Terminology -//! -//! - **Staking Asset:** The asset for staking, to participate as Validators in the network. -//! - **Spending Asset:** The asset for payment, such as paying transfer fees, gas fees, etc. -//! - **Permissions:** A set of rules for a kind of asset, defining the allowed operations to the asset, and which -//! accounts are allowed to possess it. -//! - **Total Issuance:** The total number of units in existence in a system. -//! - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only balance that matters -//! for most operations. When this balance falls below the existential deposit, most functionality of the account is -//! removed. When both it and the reserved balance are deleted, then the account is said to be dead. -//! - **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. Reserved balance -//! can still be slashed, but only after all the free balance has been slashed. If the reserved balance falls below the -//! existential deposit then it and any related functionality will be deleted. When both it and the free balance are -//! deleted, then the account is said to be dead. -//! - **Imbalance:** A condition when some assets were credited or debited without equal and opposite accounting -//! (i.e. a difference between total issuance and account balances). Functions that result in an imbalance will -//! return an object of the `Imbalance` trait that can be managed within your runtime logic. (If an imbalance is -//! simply dropped, it should automatically maintain any book-keeping such as total issuance.) -//! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block number. Multiple -//! locks always operate over the same funds, so they "overlay" rather than "stack". -//! -//! ### Implementations -//! -//! The Generic Asset module provides `AssetCurrency`, which implements the following traits. If these traits provide -//! the functionality that you need, you can avoid coupling with the Generic Asset module. -//! -//! - `Currency`: Functions for dealing with a fungible assets system. -//! - `ReservableCurrency`: Functions for dealing with assets that can be reserved from an account. -//! - `LockableCurrency`: Functions for dealing with accounts that allow liquidity restrictions. -//! - `Imbalance`: Functions for handling imbalances between total issuance in the system and account balances. -//! Must be used when a function creates new assets (e.g. a reward) or destroys some assets (e.g. a system fee). -//! -//! The Generic Asset module provides two types of `AssetCurrency` as follows. -//! -//! - `StakingAssetCurrency`: Currency for staking. -//! - `SpendingAssetCurrency`: Currency for payments such as transfer fee, gas fee. -//! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! - `create`: Create a new kind of asset. -//! - `transfer`: Transfer some liquid free balance to another account. -//! - `update_permission`: Updates permission for a given `asset_id` and an account. The origin of this call -//! must have update permissions. -//! - `mint`: Mint an asset, increases its total issuance. The origin of this call must have mint permissions. -//! - `burn`: Burn an asset, decreases its total issuance. The origin of this call must have burn permissions. -//! - `create_reserved`: Create a new kind of reserved asset. The origin of this call must be root. -//! -//! ### Public Functions -//! -//! - `total_balance`: Get an account's total balance of an asset kind. -//! - `free_balance`: Get an account's free balance of an asset kind. -//! - `reserved_balance`: Get an account's reserved balance of an asset kind. -//! - `create_asset`: Creates an asset. -//! - `make_transfer`: Transfer some liquid free balance from one account to another. -//! This will not emit the `Transferred` event. -//! - `make_transfer_with_event`: Transfer some liquid free balance from one account to another. -//! This will emit the `Transferred` event. -//! - `reserve`: Moves an amount from free balance to reserved balance. -//! - `unreserve`: Move up to an amount from reserved balance to free balance. This function cannot fail. -//! - `mint_free`: Mint to an account's free balance. -//! - `burn_free`: Burn an account's free balance. -//! - `slash`: Deduct up to an amount from the combined balance of `who`, preferring to deduct from the -//! free balance. This function cannot fail. -//! - `slash_reserved`: Deduct up to an amount from reserved balance of an account. This function cannot fail. -//! - `repatriate_reserved`: Move up to an amount from reserved balance of an account to free balance of another -//! account. -//! - `check_permission`: Check permission to perform burn, mint or update. -//! - `ensure_can_withdraw`: Check if the account is able to make a withdrawal of the given amount -//! for the given reason. -//! -//! ### Usage -//! -//! The following examples show how to use the Generic Asset Pallet in your custom pallet. -//! -//! ### Examples from the FRAME pallet -//! -//! The Fees Pallet uses the `Currency` trait to handle fee charge/refund, and its types inherit from `Currency`: -//! -//! ``` -//! use frame_support::{ -//! dispatch, -//! traits::{Currency, ExistenceRequirement, WithdrawReason}, -//! }; -//! # pub trait Trait: frame_system::Trait { -//! # type Currency: Currency; -//! # } -//! type AssetOf = <::Currency as Currency<::AccountId>>::Balance; -//! -//! fn charge_fee(transactor: &T::AccountId, amount: AssetOf) -> dispatch::DispatchResult { -//! // ... -//! T::Currency::withdraw( -//! transactor, -//! amount, -//! WithdrawReason::TransactionPayment.into(), -//! ExistenceRequirement::KeepAlive, -//! )?; -//! // ... -//! Ok(()) -//! } -//! -//! fn refund_fee(transactor: &T::AccountId, amount: AssetOf) -> dispatch::DispatchResult { -//! // ... -//! T::Currency::deposit_into_existing(transactor, amount)?; -//! // ... -//! Ok(()) -//! } -//! -//! # fn main() {} -//! ``` -//! -//! ## Genesis config -//! -//! The Generic Asset Pallet depends on the [`GenesisConfig`](./struct.GenesisConfig.html). - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode, HasCompact, Input, Output, Error as CodecError}; - -use sp_runtime::{RuntimeDebug, DispatchResult, DispatchError}; -use sp_runtime::traits::{ - CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Member, One, Saturating, AtLeast32Bit, - Zero, Bounded, AtLeast32BitUnsigned -}; - -use sp_std::prelude::*; -use sp_std::{cmp, result, fmt::Debug}; -use frame_support::{ - decl_event, decl_module, decl_storage, ensure, decl_error, - traits::{ - Currency, ExistenceRequirement, Imbalance, LockIdentifier, LockableCurrency, - ReservableCurrency, SignedImbalance, WithdrawReason, WithdrawReasons, TryDrop, - BalanceStatus, - }, - Parameter, StorageMap, -}; -use frame_system::{ensure_signed, ensure_root}; - -mod mock; -mod tests; - -pub use self::imbalances::{NegativeImbalance, PositiveImbalance}; - -pub trait Trait: frame_system::Trait { - type Balance: Parameter + Member + AtLeast32BitUnsigned + Default + Copy + Debug + - MaybeSerializeDeserialize; - type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; - type Event: From> + Into<::Event>; -} - -pub trait Subtrait: frame_system::Trait { - type Balance: Parameter + Member + AtLeast32BitUnsigned + Default + Copy + Debug + - MaybeSerializeDeserialize; - type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; -} - -impl Subtrait for T { - type Balance = T::Balance; - type AssetId = T::AssetId; -} - -/// Asset creation options. -#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] -pub struct AssetOptions { - /// Initial issuance of this asset. All deposit to the creator of the asset. - #[codec(compact)] - pub initial_issuance: Balance, - /// Which accounts are allowed to possess this asset. - pub permissions: PermissionLatest, -} - -/// Owner of an asset. -#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] -pub enum Owner { - /// No owner. - None, - /// Owned by an AccountId - Address(AccountId), -} - -impl Default for Owner { - fn default() -> Self { - Owner::None - } -} - -/// Asset permissions -#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] -pub struct PermissionsV1 { - /// Who have permission to update asset permission - pub update: Owner, - /// Who have permission to mint new asset - pub mint: Owner, - /// Who have permission to burn asset - pub burn: Owner, -} - -#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] -#[repr(u8)] -enum PermissionVersionNumber { - V1 = 0, -} - -/// Versioned asset permission -#[derive(Clone, PartialEq, Eq, RuntimeDebug)] -pub enum PermissionVersions { - V1(PermissionsV1), -} - -/// Asset permission types -pub enum PermissionType { - /// Permission to burn asset permission - Burn, - /// Permission to mint new asset - Mint, - /// Permission to update asset - Update, -} - -/// Alias to latest asset permissions -pub type PermissionLatest = PermissionsV1; - -impl Default for PermissionVersions { - fn default() -> Self { - PermissionVersions::V1(Default::default()) - } -} - -impl Encode for PermissionVersions { - fn encode_to(&self, dest: &mut T) { - match self { - PermissionVersions::V1(payload) => { - dest.push(&PermissionVersionNumber::V1); - dest.push(payload); - }, - } - } -} - -impl codec::EncodeLike for PermissionVersions {} - -impl Decode for PermissionVersions { - fn decode(input: &mut I) -> core::result::Result { - let version = PermissionVersionNumber::decode(input)?; - Ok( - match version { - PermissionVersionNumber::V1 => PermissionVersions::V1(Decode::decode(input)?) - } - ) - } -} - -impl Default for PermissionsV1 { - fn default() -> Self { - PermissionsV1 { - update: Owner::None, - mint: Owner::None, - burn: Owner::None, - } - } -} - -impl Into> for PermissionVersions { - fn into(self) -> PermissionLatest { - match self { - PermissionVersions::V1(v1) => v1, - } - } -} - -/// Converts the latest permission to other version. -impl Into> for PermissionLatest { - fn into(self) -> PermissionVersions { - PermissionVersions::V1(self) - } -} - -decl_error! { - /// Error for the generic-asset module. - pub enum Error for Module { - /// No new assets id available. - NoIdAvailable, - /// Cannot transfer zero amount. - ZeroAmount, - /// The origin does not have enough permission to update permissions. - NoUpdatePermission, - /// The origin does not have permission to mint an asset. - NoMintPermission, - /// The origin does not have permission to burn an asset. - NoBurnPermission, - /// Total issuance got overflowed after minting. - TotalMintingOverflow, - /// Free balance got overflowed after minting. - FreeMintingOverflow, - /// Total issuance got underflowed after burning. - TotalBurningUnderflow, - /// Free balance got underflowed after burning. - FreeBurningUnderflow, - /// Asset id is already taken. - IdAlreadyTaken, - /// Asset id not available. - IdUnavailable, - /// The balance is too low to send amount. - InsufficientBalance, - /// The account liquidity restrictions prevent withdrawal. - LiquidityRestrictions, - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Create a new kind of asset. - #[weight = 0] - fn create(origin, options: AssetOptions) -> DispatchResult { - let origin = ensure_signed(origin)?; - Self::create_asset(None, Some(origin), options) - } - - /// Transfer some liquid free balance to another account. - #[weight = 0] - pub fn transfer(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, #[compact] amount: T::Balance) { - let origin = ensure_signed(origin)?; - ensure!(!amount.is_zero(), Error::::ZeroAmount); - Self::make_transfer_with_event(&asset_id, &origin, &to, amount)?; - } - - /// Updates permission for a given `asset_id` and an account. - /// - /// The `origin` must have `update` permission. - #[weight = 0] - fn update_permission( - origin, - #[compact] asset_id: T::AssetId, - new_permission: PermissionLatest - ) -> DispatchResult { - let origin = ensure_signed(origin)?; - - let permissions: PermissionVersions = new_permission.into(); - - if Self::check_permission(&asset_id, &origin, &PermissionType::Update) { - >::insert(asset_id, &permissions); - - Self::deposit_event(RawEvent::PermissionUpdated(asset_id, permissions.into())); - - Ok(()) - } else { - Err(Error::::NoUpdatePermission)? - } - } - - /// Mints an asset, increases its total issuance. - /// The origin must have `mint` permissions. - #[weight = 0] - fn mint(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::mint_free(&asset_id, &who, &to, &amount)?; - Self::deposit_event(RawEvent::Minted(asset_id, to, amount)); - Ok(()) - } - - /// Burns an asset, decreases its total issuance. - /// The `origin` must have `burn` permissions. - #[weight = 0] - fn burn(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::burn_free(&asset_id, &who, &to, &amount)?; - Self::deposit_event(RawEvent::Burned(asset_id, to, amount)); - Ok(()) - } - - /// Can be used to create reserved tokens. - /// Requires Root call. - #[weight = 0] - fn create_reserved( - origin, - asset_id: T::AssetId, - options: AssetOptions - ) -> DispatchResult { - ensure_root(origin)?; - Self::create_asset(Some(asset_id), None, options) - } - } -} - -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct BalanceLock { - pub id: LockIdentifier, - pub amount: Balance, - pub reasons: WithdrawReasons, -} - -decl_storage! { - trait Store for Module as GenericAsset { - /// Total issuance of a given asset. - /// - /// TWOX-NOTE: `AssetId` is trusted. - pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { - let issuance = config.initial_balance * (config.endowed_accounts.len() as u32).into(); - config.assets.iter().map(|id| (id.clone(), issuance)).collect::>() - }): map hasher(twox_64_concat) T::AssetId => T::Balance; - - /// The free balance of a given asset under an account. - /// - /// TWOX-NOTE: `AssetId` is trusted. - pub FreeBalance: - double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; - - /// The reserved balance of a given asset under an account. - /// - /// TWOX-NOTE: `AssetId` is trusted. - pub ReservedBalance: - double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; - - /// Next available ID for user-created asset. - pub NextAssetId get(fn next_asset_id) config(): T::AssetId; - - /// Permission options for a given asset. - /// - /// TWOX-NOTE: `AssetId` is trusted. - pub Permissions get(fn get_permission): - map hasher(twox_64_concat) T::AssetId => PermissionVersions; - - /// Any liquidity locks on some account balances. - pub Locks get(fn locks): - map hasher(blake2_128_concat) T::AccountId => Vec>; - - /// The identity of the asset which is the one that is designated for the chain's staking system. - pub StakingAssetId get(fn staking_asset_id) config(): T::AssetId; - - /// The identity of the asset which is the one that is designated for paying the chain's transaction fee. - pub SpendingAssetId get(fn spending_asset_id) config(): T::AssetId; - } - add_extra_genesis { - config(assets): Vec; - config(initial_balance): T::Balance; - config(endowed_accounts): Vec; - - build(|config: &GenesisConfig| { - config.assets.iter().for_each(|asset_id| { - config.endowed_accounts.iter().for_each(|account_id| { - >::insert(asset_id, account_id, &config.initial_balance); - }); - }); - }); - } -} - -decl_event!( - pub enum Event where - ::AccountId, - ::Balance, - ::AssetId, - AssetOptions = AssetOptions<::Balance, ::AccountId> - { - /// Asset created. [asset_id, creator, asset_options] - Created(AssetId, AccountId, AssetOptions), - /// Asset transfer succeeded. [asset_id, from, to, amount] - Transferred(AssetId, AccountId, AccountId, Balance), - /// Asset permission updated. [asset_id, new_permissions] - PermissionUpdated(AssetId, PermissionLatest), - /// New asset minted. [asset_id, account, amount] - Minted(AssetId, AccountId, Balance), - /// Asset burned. [asset_id, account, amount] - Burned(AssetId, AccountId, Balance), - } -); - -impl Module { - // PUBLIC IMMUTABLES - - /// Get an account's total balance of an asset kind. - pub fn total_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { - Self::free_balance(asset_id, who) + Self::reserved_balance(asset_id, who) - } - - /// Get an account's free balance of an asset kind. - pub fn free_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { - >::get(asset_id, who) - } - - /// Get an account's reserved balance of an asset kind. - pub fn reserved_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { - >::get(asset_id, who) - } - - /// Mint to an account's free balance, without event - pub fn mint_free( - asset_id: &T::AssetId, - who: &T::AccountId, - to: &T::AccountId, - amount: &T::Balance, - ) -> DispatchResult { - if Self::check_permission(asset_id, who, &PermissionType::Mint) { - let original_free_balance = Self::free_balance(&asset_id, &to); - let current_total_issuance = >::get(asset_id); - let new_total_issuance = current_total_issuance.checked_add(&amount) - .ok_or(Error::::TotalMintingOverflow)?; - let value = original_free_balance.checked_add(&amount) - .ok_or(Error::::FreeMintingOverflow)?; - - >::insert(asset_id, new_total_issuance); - Self::set_free_balance(&asset_id, &to, value); - Ok(()) - } else { - Err(Error::::NoMintPermission)? - } - } - - /// Burn an account's free balance, without event - pub fn burn_free( - asset_id: &T::AssetId, - who: &T::AccountId, - to: &T::AccountId, - amount: &T::Balance, - ) -> DispatchResult { - if Self::check_permission(asset_id, who, &PermissionType::Burn) { - let original_free_balance = Self::free_balance(asset_id, to); - - let current_total_issuance = >::get(asset_id); - let new_total_issuance = current_total_issuance.checked_sub(amount) - .ok_or(Error::::TotalBurningUnderflow)?; - let value = original_free_balance.checked_sub(amount) - .ok_or(Error::::FreeBurningUnderflow)?; - - >::insert(asset_id, new_total_issuance); - Self::set_free_balance(asset_id, to, value); - Ok(()) - } else { - Err(Error::::NoBurnPermission)? - } - } - - /// Creates an asset. - /// - /// # Arguments - /// * `asset_id`: An ID of a reserved asset. - /// If not provided, a user-generated asset will be created with the next available ID. - /// * `from_account`: The initiator account of this call - /// * `asset_options`: Asset creation options. - /// - pub fn create_asset( - asset_id: Option, - from_account: Option, - options: AssetOptions, - ) -> DispatchResult { - let asset_id = if let Some(asset_id) = asset_id { - ensure!(!>::contains_key(&asset_id), Error::::IdAlreadyTaken); - ensure!(asset_id < Self::next_asset_id(), Error::::IdUnavailable); - asset_id - } else { - let asset_id = Self::next_asset_id(); - let next_id = asset_id - .checked_add(&One::one()) - .ok_or(Error::::NoIdAvailable)?; - >::put(next_id); - asset_id - }; - - let account_id = from_account.unwrap_or_default(); - let permissions: PermissionVersions = options.permissions.clone().into(); - - >::insert(asset_id, &options.initial_issuance); - >::insert(&asset_id, &account_id, &options.initial_issuance); - >::insert(&asset_id, permissions); - - Self::deposit_event(RawEvent::Created(asset_id, account_id, options)); - - Ok(()) - } - - /// Transfer some liquid free balance from one account to another. - /// This will not emit the `Transferred` event. - pub fn make_transfer( - asset_id: &T::AssetId, - from: &T::AccountId, - to: &T::AccountId, - amount: T::Balance - ) -> DispatchResult { - let new_balance = Self::free_balance(asset_id, from) - .checked_sub(&amount) - .ok_or(Error::::InsufficientBalance)?; - Self::ensure_can_withdraw(asset_id, from, amount, WithdrawReason::Transfer.into(), new_balance)?; - - if from != to { - >::mutate(asset_id, from, |balance| *balance -= amount); - >::mutate(asset_id, to, |balance| *balance += amount); - } - - Ok(()) - } - - /// Transfer some liquid free balance from one account to another. - /// This will emit the `Transferred` event. - pub fn make_transfer_with_event( - asset_id: &T::AssetId, - from: &T::AccountId, - to: &T::AccountId, - amount: T::Balance, - ) -> DispatchResult { - Self::make_transfer(asset_id, from, to, amount)?; - - if from != to { - Self::deposit_event(RawEvent::Transferred(*asset_id, from.clone(), to.clone(), amount)); - } - - Ok(()) - } - - /// Move `amount` from free balance to reserved balance. - /// - /// If the free balance is lower than `amount`, then no funds will be moved and an `Err` will - /// be returned. This is different behavior than `unreserve`. - pub fn reserve(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) - -> DispatchResult - { - // Do we need to consider that this is an atomic transaction? - let original_reserve_balance = Self::reserved_balance(asset_id, who); - let original_free_balance = Self::free_balance(asset_id, who); - if original_free_balance < amount { - Err(Error::::InsufficientBalance)? - } - let new_reserve_balance = original_reserve_balance + amount; - Self::set_reserved_balance(asset_id, who, new_reserve_balance); - let new_free_balance = original_free_balance - amount; - Self::set_free_balance(asset_id, who, new_free_balance); - Ok(()) - } - - /// Moves up to `amount` from reserved balance to free balance. This function cannot fail. - /// - /// As many assets up to `amount` will be moved as possible. If the reserve balance of `who` - /// is less than `amount`, then the remaining amount will be returned. - /// NOTE: This is different behavior than `reserve`. - pub fn unreserve(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> T::Balance { - let b = Self::reserved_balance(asset_id, who); - let actual = sp_std::cmp::min(b, amount); - let original_free_balance = Self::free_balance(asset_id, who); - let new_free_balance = original_free_balance + actual; - Self::set_free_balance(asset_id, who, new_free_balance); - Self::set_reserved_balance(asset_id, who, b - actual); - amount - actual - } - - /// Deduct up to `amount` from the combined balance of `who`, preferring to deduct from the - /// free balance. This function cannot fail. - /// - /// As much funds up to `amount` will be deducted as possible. If this is less than `amount` - /// then `Some(remaining)` will be returned. Full completion is given by `None`. - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - pub fn slash(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { - let free_balance = Self::free_balance(asset_id, who); - let free_slash = sp_std::cmp::min(free_balance, amount); - let new_free_balance = free_balance - free_slash; - Self::set_free_balance(asset_id, who, new_free_balance); - if free_slash < amount { - Self::slash_reserved(asset_id, who, amount - free_slash) - } else { - None - } - } - - /// Deducts up to `amount` from reserved balance of `who`. This function cannot fail. - /// - /// As much funds up to `amount` will be deducted as possible. If the reserve balance of `who` - /// is less than `amount`, then a non-zero second item will be returned. - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - pub fn slash_reserved(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { - let original_reserve_balance = Self::reserved_balance(asset_id, who); - let slash = sp_std::cmp::min(original_reserve_balance, amount); - let new_reserve_balance = original_reserve_balance - slash; - Self::set_reserved_balance(asset_id, who, new_reserve_balance); - if amount == slash { - None - } else { - Some(amount - slash) - } - } - - /// Move up to `amount` from reserved balance of account `who` to balance of account - /// `beneficiary`, either free or reserved depending on `status`. - /// - /// As much funds up to `amount` will be moved as possible. If this is less than `amount`, then - /// the `remaining` would be returned, else `Zero::zero()`. - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - pub fn repatriate_reserved( - asset_id: &T::AssetId, - who: &T::AccountId, - beneficiary: &T::AccountId, - amount: T::Balance, - status: BalanceStatus, - ) -> T::Balance { - let b = Self::reserved_balance(asset_id, who); - let slash = sp_std::cmp::min(b, amount); - - match status { - BalanceStatus::Free => { - let original_free_balance = Self::free_balance(asset_id, beneficiary); - let new_free_balance = original_free_balance + slash; - Self::set_free_balance(asset_id, beneficiary, new_free_balance); - } - BalanceStatus::Reserved => { - let original_reserved_balance = Self::reserved_balance(asset_id, beneficiary); - let new_reserved_balance = original_reserved_balance + slash; - Self::set_reserved_balance(asset_id, beneficiary, new_reserved_balance); - } - } - - let new_reserve_balance = b - slash; - Self::set_reserved_balance(asset_id, who, new_reserve_balance); - amount - slash - } - - /// Check permission to perform burn, mint or update. - /// - /// # Arguments - /// * `asset_id`: A `T::AssetId` type that contains the `asset_id`, which has the permission embedded. - /// * `who`: A `T::AccountId` type that contains the `account_id` for which to check permissions. - /// * `what`: The permission to check. - /// - pub fn check_permission(asset_id: &T::AssetId, who: &T::AccountId, what: &PermissionType) -> bool { - let permission_versions: PermissionVersions = Self::get_permission(asset_id); - let permission = permission_versions.into(); - - match (what, permission) { - ( - PermissionType::Burn, - PermissionLatest { - burn: Owner::Address(account), - .. - }, - ) => account == *who, - ( - PermissionType::Mint, - PermissionLatest { - mint: Owner::Address(account), - .. - }, - ) => account == *who, - ( - PermissionType::Update, - PermissionLatest { - update: Owner::Address(account), - .. - }, - ) => account == *who, - _ => false, - } - } - - /// Return `Ok` iff the account is able to make a withdrawal of the given amount - /// for the given reason. - /// - /// `Err(...)` with the reason why not otherwise. - pub fn ensure_can_withdraw( - asset_id: &T::AssetId, - who: &T::AccountId, - _amount: T::Balance, - reasons: WithdrawReasons, - new_balance: T::Balance, - ) -> DispatchResult { - if asset_id != &Self::staking_asset_id() { - return Ok(()); - } - - let locks = Self::locks(who); - if locks.is_empty() { - return Ok(()); - } - if Self::locks(who) - .into_iter().all(|l| new_balance >= l.amount || !l.reasons.intersects(reasons)) - { - Ok(()) - } else { - Err(Error::::LiquidityRestrictions)? - } - } - - // PRIVATE MUTABLES - - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn set_reserved_balance(asset_id: &T::AssetId, who: &T::AccountId, balance: T::Balance) { - >::insert(asset_id, who, &balance); - } - - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn set_free_balance(asset_id: &T::AssetId, who: &T::AccountId, balance: T::Balance) { - >::insert(asset_id, who, &balance); - } - - fn set_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - let mut new_lock = Some(BalanceLock { - id, - amount, - reasons, - }); - let mut locks = >::locks(who) - .into_iter() - .filter_map(|l| { - if l.id == id { - new_lock.take() - } else { - Some(l) - } - }) - .collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - >::insert(who, locks); - } - - fn extend_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - let mut new_lock = Some(BalanceLock { - id, - amount, - reasons, - }); - let mut locks = >::locks(who) - .into_iter() - .filter_map(|l| { - if l.id == id { - new_lock.take().map(|nl| BalanceLock { - id: l.id, - amount: l.amount.max(nl.amount), - reasons: l.reasons | nl.reasons, - }) - } else { - Some(l) - } - }) - .collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - >::insert(who, locks); - } - - fn remove_lock(id: LockIdentifier, who: &T::AccountId) { - let mut locks = >::locks(who); - locks.retain(|l| l.id != id); - >::insert(who, locks); - } -} - -pub trait AssetIdProvider { - type AssetId; - fn asset_id() -> Self::AssetId; -} - -// wrapping these imbalances in a private module is necessary to ensure absolute privacy -// of the inner member. -mod imbalances { - use super::{ - result, AssetIdProvider, Imbalance, Saturating, StorageMap, Subtrait, Zero, TryDrop - }; - use sp_std::mem; - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been created without any equal and opposite accounting. - #[must_use] - pub struct PositiveImbalance>( - T::Balance, - sp_std::marker::PhantomData, - ); - impl PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - pub fn new(amount: T::Balance) -> Self { - PositiveImbalance(amount, Default::default()) - } - } - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been destroyed without any equal and opposite accounting. - #[must_use] - pub struct NegativeImbalance>( - T::Balance, - sp_std::marker::PhantomData, - ); - impl NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - pub fn new(amount: T::Balance) -> Self { - NegativeImbalance(amount, Default::default()) - } - } - - impl TryDrop for PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() - } - } - - impl Imbalance for PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - type Opposite = NegativeImbalance; - - fn zero() -> Self { - Self::new(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - - mem::forget(self); - (Self::new(first), Self::new(second)) - } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> result::Result { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a >= b { - Ok(Self::new(a - b)) - } else { - Err(NegativeImbalance::new(b - a)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } - } - - impl TryDrop for NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() - } - } - - impl Imbalance for NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - type Opposite = PositiveImbalance; - - fn zero() -> Self { - Self::new(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - - mem::forget(self); - (Self::new(first), Self::new(second)) - } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> result::Result { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a >= b { - Ok(Self::new(a - b)) - } else { - Err(PositiveImbalance::new(b - a)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } - } - - impl Drop for PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - >>::mutate(&U::asset_id(), |v| *v = v.saturating_add(self.0)); - } - } - - impl Drop for NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - >>::mutate(&U::asset_id(), |v| *v = v.saturating_sub(self.0)); - } - } -} - -// TODO: #2052 -// Somewhat ugly hack in order to gain access to module's `increase_total_issuance_by` -// using only the Subtrait (which defines only the types that are not dependent -// on Positive/NegativeImbalance). Subtrait must be used otherwise we end up with a -// circular dependency with Trait having some types be dependent on PositiveImbalance -// and PositiveImbalance itself depending back on Trait for its Drop impl (and thus -// its type declaration). -// This works as long as `increase_total_issuance_by` doesn't use the Imbalance -// types (basically for charging fees). -// This should eventually be refactored so that the two type items that do -// depend on the Imbalance type (TransactionPayment, DustRemoval) -// are placed in their own pallet. -struct ElevatedTrait(T); -impl Clone for ElevatedTrait { - fn clone(&self) -> Self { - unimplemented!() - } -} -impl PartialEq for ElevatedTrait { - fn eq(&self, _: &Self) -> bool { - unimplemented!() - } -} -impl Eq for ElevatedTrait {} -impl frame_system::Trait for ElevatedTrait { - type BaseCallFilter = T::BaseCallFilter; - type Origin = T::Origin; - type Call = T::Call; - type Index = T::Index; - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - type Hashing = T::Hashing; - type AccountId = T::AccountId; - type Lookup = T::Lookup; - type Header = T::Header; - type Event = (); - type BlockHashCount = T::BlockHashCount; - type MaximumBlockWeight = T::MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = T::MaximumBlockWeight; - type MaximumBlockLength = T::MaximumBlockLength; - type AvailableBlockRatio = T::AvailableBlockRatio; - type Version = T::Version; - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); -} -impl Trait for ElevatedTrait { - type Balance = T::Balance; - type AssetId = T::AssetId; - type Event = (); -} - -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct AssetCurrency(sp_std::marker::PhantomData, sp_std::marker::PhantomData); - -impl Currency for AssetCurrency -where - T: Trait, - U: AssetIdProvider, -{ - type Balance = T::Balance; - type PositiveImbalance = PositiveImbalance; - type NegativeImbalance = NegativeImbalance; - - fn total_balance(who: &T::AccountId) -> Self::Balance { - Self::free_balance(&who) + Self::reserved_balance(&who) - } - - fn free_balance(who: &T::AccountId) -> Self::Balance { - >::free_balance(&U::asset_id(), &who) - } - - /// Returns the total staking asset issuance - fn total_issuance() -> Self::Balance { - >::total_issuance(U::asset_id()) - } - - fn minimum_balance() -> Self::Balance { - Zero::zero() - } - - fn transfer( - transactor: &T::AccountId, - dest: &T::AccountId, - value: Self::Balance, - _: ExistenceRequirement, // no existential deposit policy for generic asset - ) -> DispatchResult { - >::make_transfer(&U::asset_id(), transactor, dest, value) - } - - fn ensure_can_withdraw( - who: &T::AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - new_balance: Self::Balance, - ) -> DispatchResult { - >::ensure_can_withdraw(&U::asset_id(), who, amount, reasons, new_balance) - } - - fn withdraw( - who: &T::AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - _: ExistenceRequirement, // no existential deposit policy for generic asset - ) -> result::Result { - let new_balance = Self::free_balance(who) - .checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; - Self::ensure_can_withdraw(who, value, reasons, new_balance)?; - >::set_free_balance(&U::asset_id(), who, new_balance); - Ok(NegativeImbalance::new(value)) - } - - fn deposit_into_existing( - who: &T::AccountId, - value: Self::Balance, - ) -> result::Result { - // No existential deposit rule and creation fee in GA. `deposit_into_existing` is same with `deposit_creating`. - Ok(Self::deposit_creating(who, value)) - } - - fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { - let imbalance = Self::make_free_balance_be(who, Self::free_balance(who) + value); - if let SignedImbalance::Positive(p) = imbalance { - p - } else { - // Impossible, but be defensive. - Self::PositiveImbalance::zero() - } - } - - fn make_free_balance_be( - who: &T::AccountId, - balance: Self::Balance, - ) -> SignedImbalance { - let original = >::free_balance(&U::asset_id(), who); - let imbalance = if original <= balance { - SignedImbalance::Positive(PositiveImbalance::new(balance - original)) - } else { - SignedImbalance::Negative(NegativeImbalance::new(original - balance)) - }; - >::set_free_balance(&U::asset_id(), who, balance); - imbalance - } - - fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { - >::free_balance(&U::asset_id(), &who) >= value - } - - fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { - let remaining = >::slash(&U::asset_id(), who, value); - if let Some(r) = remaining { - (NegativeImbalance::new(value - r), r) - } else { - (NegativeImbalance::new(value), Zero::zero()) - } - } - - fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { - >::mutate(&U::asset_id(), |issued| - issued.checked_sub(&amount).unwrap_or_else(|| { - amount = *issued; - Zero::zero() - }) - ); - PositiveImbalance::new(amount) - } - - fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { - >::mutate(&U::asset_id(), |issued| - *issued = issued.checked_add(&amount).unwrap_or_else(|| { - amount = Self::Balance::max_value() - *issued; - Self::Balance::max_value() - }) - ); - NegativeImbalance::new(amount) - } -} - -impl ReservableCurrency for AssetCurrency -where - T: Trait, - U: AssetIdProvider, -{ - fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { - Self::free_balance(who) - .checked_sub(&value) - .map_or(false, |new_balance| - >::ensure_can_withdraw( - &U::asset_id(), who, value, WithdrawReason::Reserve.into(), new_balance - ).is_ok() - ) - } - - fn reserved_balance(who: &T::AccountId) -> Self::Balance { - >::reserved_balance(&U::asset_id(), &who) - } - - fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { - >::reserve(&U::asset_id(), who, value) - } - - fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { - >::unreserve(&U::asset_id(), who, value) - } - - fn slash_reserved(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { - let b = Self::reserved_balance(&who.clone()); - let slash = cmp::min(b, value); - - >::set_reserved_balance(&U::asset_id(), who, b - slash); - (NegativeImbalance::new(slash), value - slash) - } - - fn repatriate_reserved( - slashed: &T::AccountId, - beneficiary: &T::AccountId, - value: Self::Balance, - status: BalanceStatus, - ) -> result::Result { - Ok(>::repatriate_reserved(&U::asset_id(), slashed, beneficiary, value, status)) - } -} - -pub struct StakingAssetIdProvider(sp_std::marker::PhantomData); - -impl AssetIdProvider for StakingAssetIdProvider { - type AssetId = T::AssetId; - fn asset_id() -> Self::AssetId { - >::staking_asset_id() - } -} - -pub struct SpendingAssetIdProvider(sp_std::marker::PhantomData); - -impl AssetIdProvider for SpendingAssetIdProvider { - type AssetId = T::AssetId; - fn asset_id() -> Self::AssetId { - >::spending_asset_id() - } -} - -impl LockableCurrency for AssetCurrency> -where - T: Trait, - T::Balance: MaybeSerializeDeserialize + Debug, -{ - type Moment = T::BlockNumber; - - fn set_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - >::set_lock(id, who, amount, reasons) - } - - fn extend_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - >::extend_lock(id, who, amount, reasons) - } - - fn remove_lock(id: LockIdentifier, who: &T::AccountId) { - >::remove_lock(id, who) - } -} - -pub type StakingAssetCurrency = AssetCurrency>; -pub type SpendingAssetCurrency = AssetCurrency>; diff --git a/frame/generic-asset/src/mock.rs b/frame/generic-asset/src/mock.rs deleted file mode 100644 index 8c0a06a1564df1840a7a47f8ca5d9e59d874f0a5..0000000000000000000000000000000000000000 --- a/frame/generic-asset/src/mock.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2019-2020 -// by Centrality Investments Ltd. -// and Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Mocks for the module. - -#![cfg(test)] - -use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; -use sp_core::H256; -use frame_support::{parameter_types, impl_outer_event, impl_outer_origin, weights::Weight}; - -use super::*; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -#[derive(Clone, Eq, PartialEq)] -pub struct Test; -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} -impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type BlockHashCount = BlockHashCount; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); -} - -impl Trait for Test { - type Balance = u64; - type AssetId = u32; - type Event = TestEvent; -} - -mod generic_asset { - pub use crate::Event; -} - -use frame_system as system; -impl_outer_event! { - pub enum TestEvent for Test { - system, - generic_asset, - } -} - -pub type GenericAsset = Module; - -pub type System = frame_system::Module; - -pub struct ExtBuilder { - asset_id: u32, - next_asset_id: u32, - accounts: Vec, - initial_balance: u64, -} - -// Returns default values for genesis config -impl Default for ExtBuilder { - fn default() -> Self { - Self { - asset_id: 0, - next_asset_id: 1000, - accounts: vec![0], - initial_balance: 0, - } - } -} - -impl ExtBuilder { - // Sets free balance to genesis config - pub fn free_balance(mut self, free_balance: (u32, u64, u64)) -> Self { - self.asset_id = free_balance.0; - self.accounts = vec![free_balance.1]; - self.initial_balance = free_balance.2; - self - } - - pub fn next_asset_id(mut self, asset_id: u32) -> Self { - self.next_asset_id = asset_id; - self - } - - // builds genesis config - pub fn build(self) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - GenesisConfig:: { - assets: vec![self.asset_id], - endowed_accounts: self.accounts, - initial_balance: self.initial_balance, - next_asset_id: self.next_asset_id, - staking_asset_id: 16000, - spending_asset_id: 16001, - }.assimilate_storage(&mut t).unwrap(); - - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } -} - -pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default() - .build_storage::() - .unwrap() - .into() -} diff --git a/frame/generic-asset/src/tests.rs b/frame/generic-asset/src/tests.rs deleted file mode 100644 index a094f69ba1fc581f0138574e06c202c404050cd6..0000000000000000000000000000000000000000 --- a/frame/generic-asset/src/tests.rs +++ /dev/null @@ -1,1215 +0,0 @@ -// Copyright 2019-2020 -// by Centrality Investments Ltd. -// and Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Tests for the module. - -#![cfg(test)] - -use super::*; -use crate::mock::{new_test_ext, ExtBuilder, GenericAsset, Origin, System, Test, TestEvent}; -use frame_support::{assert_noop, assert_ok}; - -#[test] -fn issuing_asset_units_to_issuer_should_work() { - let balance = 100; - - ExtBuilder::default().free_balance((16000, 1, 100)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - - let expected_balance = balance; - - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: balance, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&16000, &1), expected_balance); - }); -} - -#[test] -fn issuing_with_next_asset_id_overflow_should_not_work() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - NextAssetId::::put(u32::max_value()); - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_noop!( - GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 1, - permissions: default_permission - } - ), - Error::::NoIdAvailable - ); - assert_eq!(GenericAsset::next_asset_id(), u32::max_value()); - }); -} - -#[test] -fn querying_total_supply_should_work() { - let asset_id = 1000; - - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 2, 50)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); - assert_eq!(GenericAsset::free_balance(&asset_id, &2), 50); - assert_ok!(GenericAsset::transfer(Origin::signed(2), asset_id, 3, 31)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); - assert_eq!(GenericAsset::free_balance(&asset_id, &2), 19); - assert_eq!(GenericAsset::free_balance(&asset_id, &3), 31); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 1, 1)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); - }); -} - -// Given -// - The next asset id as `asset_id` = 1000. -// - AssetOptions with all permissions. -// - GenesisStore has sufficient free balance. -// -// When -// - Create an asset from `origin` as 1. -// Then -// - free_balance of next asset id = 100. -// -// When -// - After transferring 40 from account 1 to account 2. -// Then -// - Origin account's `free_balance` = 60. -// - account 2's `free_balance` = 40. -#[test] -fn transferring_amount_should_work() { - let asset_id = 1000; - let free_balance = 100; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: free_balance, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), free_balance); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 2, 40)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 60); - assert_eq!(GenericAsset::free_balance(&asset_id, &2), 40); - }); -} - -// Given -// - The next asset id as `asset_id` = 1000. -// - AssetOptions with all permissions. -// - GenesisStore has sufficient free balance. -// -// When -// - Create an asset from `origin` as 1. -// Then -// - free_balance of next asset id = 100. -// -// When -// - After transferring 40 from account 1 to account 2. -// Then -// - Origin account's `free_balance` = 60. -// - account 2's `free_balance` = 40. -#[test] -fn transferring_amount_should_fail_when_transferring_more_than_free_balance() { - let asset_id = 1000; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_noop!( - GenericAsset::transfer(Origin::signed(1), asset_id, 2, 2000), - Error::::InsufficientBalance - ); - }); -} - -#[test] -fn transferring_less_than_one_unit_should_not_work() { - let asset_id = 1000; - - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); - assert_noop!( - GenericAsset::transfer(Origin::signed(1), asset_id, 2, 0), - Error::::ZeroAmount - ); - }); -} - -// Given -// - Next asset id as `asset_id` = 1000. -// - Sufficient free balance. -// - initial balance = 100. -// When -// - After performing a self transfer from account 1 to 1. -// Then -// - Should not throw any errors. -// - Free balance after self transfer should equal to the free balance before self transfer. -#[test] -fn self_transfer_should_fail() { - let asset_id = 1000; - let balance = 100; - - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: balance, - permissions: default_permission - } - )); - - let initial_free_balance = GenericAsset::free_balance(&asset_id, &1); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 1, 10)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), initial_free_balance); - }); -} - -#[test] -fn transferring_more_units_than_total_supply_should_not_work() { - let asset_id = 1000; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); - assert_noop!( - GenericAsset::transfer(Origin::signed(1), asset_id, 2, 101), - Error::::InsufficientBalance - ); - }); -} - -// Ensures it uses fake money for staking asset id. -#[test] -fn staking_asset_id_should_return_0() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(GenericAsset::staking_asset_id(), 16000); - }); -} - -// Ensures it uses fake money for spending asset id. -#[test] -fn spending_asset_id_should_return_10() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(GenericAsset::spending_asset_id(), 16001); - }); -} - -// Given -// -Â Free balance is 0 and the reserved balance is 0. -// Then -// -Â total_balance should return 0 -#[test] -fn total_balance_should_be_zero() { - new_test_ext().execute_with(|| { - assert_eq!(GenericAsset::total_balance(&0, &0), 0); - }); -} - -// Given -// -Â Free balance is 0 and the reserved balance > 0. -// When -// - After calling total_balance. -// Then -// -Â total_balance should equals to reserved balance. -#[test] -fn total_balance_should_be_equal_to_account_balance() { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::total_balance(&1000, &1), 100); - }); -} - -// Given -// - An account presents with AccountId = 1 -// -Â free_balance > 0. -// - reserved_balance = 50. -// When -// - After calling free_balance. -// Then -// -Â free_balance should return 50. -#[test] -fn free_balance_should_only_return_account_free_balance() { - ExtBuilder::default().free_balance((1, 0, 50)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 70); - assert_eq!(GenericAsset::free_balance(&1, &0), 50); - }); -} - -// Given -// - An account presents with AccountId = 1. -// -Â Free balance > 0 and the reserved balance > 0. -// When -// - After calling total_balance. -// Then -// -Â total_balance should equals to account balance + free balance. -#[test] -fn total_balance_should_be_equal_to_sum_of_account_balance_and_free_balance() { - ExtBuilder::default().free_balance((1, 0, 50)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 70); - assert_eq!(GenericAsset::total_balance(&1, &0), 120); - }); -} - -// Given -// -Â free_balance > 0. -// - reserved_balance = 70. -// When -// - After calling reserved_balance. -// Then -// - reserved_balance should return 70. -#[test] -fn reserved_balance_should_only_return_account_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 50)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 70); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 70); - }); -} - -// Given -// - A valid account presents. -// - Initial reserved_balance = 0 -// When -// - After calls set_reserved_balance -// Then -// - Should persists the amount as reserved_balance. -// - reserved_balance = amount -#[test] -fn set_reserved_balance_should_add_balance_as_reserved() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 50); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 50); - }); -} - -// Given -// - A valid account presents. -// - Initial free_balance = 100. -// When -// - After calling set_free_balance. -// Then -// - Should persists the amount as free_balance. -// - New free_balance should replace older free_balance. -#[test] -fn set_free_balance_should_add_amount_as_free_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_free_balance(&1, &0, 50); - assert_eq!(GenericAsset::free_balance(&1, &0), 50); - }); -} - -// Given -// - free_balance is greater than the account balance. -// - free_balance = 100 -// - reserved_balance = 0 -// - reserve amount = 70 -// When -// - After calling reserve -// Then -// - Funds should be removed from the account. -// - new free_balance = original free_balance - reserved amount -// - new reserved_balance = original free balance + reserved amount -#[test] -fn reserve_should_moves_amount_from_balance_to_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - assert_ok!(GenericAsset::reserve(&1, &0, 70)); - assert_eq!(GenericAsset::free_balance(&1, &0), 30); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 70); - }); -} - -// Given -// - Free balance is lower than the account balance. -// - free_balance = 100 -// - reserved_balance = 0 -// - reserve amount = 120 -// When -// - After calling reverse function. -// Then -// - Funds should not be removed from the account. -// - Should throw an error. -#[test] -fn reserve_should_not_moves_amount_from_balance_to_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - assert_noop!(GenericAsset::reserve(&1, &0, 120), Error::::InsufficientBalance); - assert_eq!(GenericAsset::free_balance(&1, &0), 100); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 0); - }); -} - -// Given -// - unreserved_amount > reserved_balance. -// - reserved_balance = 100. -// - free_balance = 100. -// - unreserved_amount = 120. -// When -// - After calling unreserve function. -// Then -// - unreserved should return 20. -#[test] -fn unreserve_should_return_subtracted_value_from_unreserved_amount_by_actual_account_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::unreserve(&1, &0, 120), 20); - }); -} - -// Given -// - unreserved_amount < reserved_balance. -// - reserved_balance = 100. -// - free_balance = 100. -// - unreserved_amount = 50. -// When -// - After calling unreserve function. -// Then -// - unreserved should return None. -#[test] -fn unreserve_should_return_none() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::unreserve(&1, &0, 50), 0); - }); -} - -// Given -// - unreserved_amount > reserved_balance. -// - reserved_balance = 100. -// - free_balance = 100. -// - unreserved_amount = 120. -// When -// - After calling unreserve function. -// Then -// - free_balance should be 200. -#[test] -fn unreserve_should_increase_free_balance_by_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - GenericAsset::unreserve(&1, &0, 120); - assert_eq!(GenericAsset::free_balance(&1, &0), 200); - }); -} - -// Given -// - unreserved_amount > reserved_balance. -// - reserved_balance = 100. -// - free_balance = 100. -// - unreserved_amount = 120. -// When -// - After calling unreserve function. -// Then -// - reserved_balance should be 0. -#[test] -fn unreserve_should_deduct_reserved_balance_by_reserved_amount() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_free_balance(&1, &0, 100); - GenericAsset::unreserve(&1, &0, 120); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 0); - }); -} - -// Given -// - slash amount < free_balance. -// - reserved_balance = 100. -// - free_balance = 100. -// - slash amount = 70. -// When -// - After calling slash function. -// Then -// - slash should return None. -#[test] -fn slash_should_return_slash_reserved_amount() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::slash(&1, &0, 70), None); - }); -} - -// Given -// - slashed_amount > reserved_balance. -// When -// - After calling slashed_reverse function. -// Then -// - Should return slashed_reserved - reserved_balance. -#[test] -fn slash_reserved_should_deducts_up_to_amount_from_reserved_balance() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::slash_reserved(&1, &0, 150), Some(50)); - }); -} - -// Given -// - slashed_amount equals to reserved_amount. -// When -// - After calling slashed_reverse function. -// Then -// - Should return None. -#[test] -fn slash_reserved_should_return_none() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::slash_reserved(&1, &0, 100), None); - }); -} - -// Given -// - reserved_balance = 100. -// - repatriate_reserved_amount > reserved_balance. -// When -// - After calling repatriate_reserved. -// Then -// - Should not return None. -#[test] -fn repatriate_reserved_return_amount_subtracted_by_slash_amount() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::repatriate_reserved(&1, &0, &1, 130, BalanceStatus::Free), 30); - }); -} - -// Given -// - reserved_balance = 100. -// - repatriate_reserved_amount > reserved_balance. -// When -// - After calling repatriate_reserved. -// Then -// - Should return None. -#[test] -fn repatriate_reserved_return_none() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::repatriate_reserved(&1, &0, &1, 90, BalanceStatus::Free), 0); - }); -} - -// Given -// - An asset with all permissions -// When -// - After calling `create_reserved` function. -// Then -// - Should create a new reserved asset. -#[test] -fn create_reserved_should_create_a_default_account_with_the_balance_given() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - let options = AssetOptions { - initial_issuance: 500, - permissions: default_permission, - }; - - let expected_total_issuance = 500; - let created_asset_id = 9; - let created_account_id = 0; - - assert_ok!(GenericAsset::create_reserved(Origin::root(), created_asset_id, options)); - - // Tests for side effects. - assert_eq!(>::get(created_asset_id), expected_total_issuance); - assert_eq!( - >::get(&created_asset_id, &created_account_id), - expected_total_issuance - ); - }); -} - -// Given -// - Origin is signed -// - Origin does not have minting permission -// When -// - After calling mint function -// Then -// - Should throw a permission error -#[test] -fn mint_should_throw_permission_error() { - ExtBuilder::default().build().execute_with(|| { - let origin = 1; - let asset_id = 4; - let to_account = 2; - let amount = 100; - - assert_noop!( - GenericAsset::mint(Origin::signed(origin), asset_id, to_account, amount), - Error::::NoMintPermission, - ); - }); -} - -// Given -// - Origin is signed. -// - Origin has permissions. -// When -// - After calling mint function -// Then -// - Should increase `to` free_balance. -// - Should not change `origins` free_balance. -#[test] -fn mint_should_increase_asset() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let to_account = 2; - let amount = 500; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - assert_ok!(GenericAsset::mint(Origin::signed(origin), asset_id, to_account, amount)); - assert_eq!(GenericAsset::free_balance(&asset_id, &to_account), amount); - - // Origin's free_balance should not change. - assert_eq!(GenericAsset::free_balance(&asset_id, &origin), initial_issuance); - }); -} - -// Given -// - Origin is signed. -// - Origin does not have burning permission. -// When -// - After calling burn function. -// Then -// - Should throw a permission error. -#[test] -fn burn_should_throw_permission_error() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 4; - let to_account = 2; - let amount = 10; - - assert_noop!( - GenericAsset::burn(Origin::signed(origin), asset_id, to_account, amount), - Error::::NoBurnPermission, - ); - }); -} - -// Given -// - Origin is signed. -// - Origin has permissions. -// When -// - After calling burn function -// Then -// - Should decrease `to`'s free_balance. -// - Should not change `origin`'s free_balance. -#[test] -fn burn_should_burn_an_asset() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let to_account = 2; - let amount = 1000; - let initial_issuance = 100; - let burn_amount = 400; - let expected_amount = 600; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - assert_ok!(GenericAsset::mint(Origin::signed(origin), asset_id, to_account, amount)); - - assert_ok!(GenericAsset::burn( - Origin::signed(origin), - asset_id, - to_account, - burn_amount - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &to_account), expected_amount); - }); -} - -// Given -// - `default_permission` with all privileges. -// - All permissions for origin. -// When -// - After executing create function and check_permission function. -// Then -// - The account origin should have burn, mint and update permissions. -#[test] -fn check_permission_should_return_correct_permission() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - }, - )); - - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Burn)); - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Mint)); - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Update)); - }); -} - -// Given -// - `default_permission` with no privileges. -// - No permissions for origin. -// When -// - After executing create function and check_permission function. -// Then -// - The account origin should not have burn, mint and update permissions. -#[test] -fn check_permission_should_return_false_for_no_permission() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::None, - mint: Owner::None, - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Burn)); - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Mint)); - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Update)); - }); -} - -// Given -// - `default_permission` only with update. -// When -// - After executing update_permission function. -// Then -// - The account origin should not have the burn permission. -// - The account origin should have update and mint permissions. -#[test] -fn update_permission_should_change_permission() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::None, - burn: Owner::None, - }; - - let new_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - assert_ok!(GenericAsset::update_permission( - Origin::signed(origin), - asset_id, - new_permission, - )); - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Mint)); - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Burn)); - }); -} - -// Given -// - `default_permission` without any permissions. -// When -// - After executing update_permission function. -// Then -// - Should throw an error stating "Origin does not have enough permission to update permissions." -#[test] -fn update_permission_should_throw_error_when_lack_of_permissions() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::None, - mint: Owner::None, - burn: Owner::None, - }; - - let new_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - }, - )); - - assert_noop!( - GenericAsset::update_permission(Origin::signed(origin), asset_id, new_permission), - Error::::NoUpdatePermission, - ); - }); -} - -// Given -// - `asset_id` provided. -// - `from_account` is present. -// - All permissions for origin. -// When -// - After calling create_asset. -// Then -// - Should create a reserved token with provided id. -// - NextAssetId doesn't change. -// - TotalIssuance must equal to initial issuance. -// - FreeBalance must equal to initial issuance for the given account. -// - Permissions must have burn, mint and updatePermission for the given asset_id. -#[test] -fn create_asset_works_with_given_asset_id_and_from_account() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = Some(1); - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - let expected_permission = PermissionVersions::V1(default_permission.clone()); - let asset_id = 9; - let initial_issuance = 100; - - assert_ok!(GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission.clone() - } - )); - - // Test for side effects. - assert_eq!(>::get(), 10); - assert_eq!(>::get(asset_id), initial_issuance); - assert_eq!(>::get(&asset_id, &origin), initial_issuance); - assert_eq!(>::get(&asset_id), expected_permission); - }); -} - -// Given -// - `asset_id` is an id for user generated assets. -// - Whatever other params. -// Then -// - `create_asset` should not work. -#[test] -fn create_asset_with_non_reserved_asset_id_should_not_work() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = Some(1); - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let asset_id = 11; - let initial_issuance = 100; - - assert_noop!( - GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance, - permissions: default_permission.clone() - } - ), - Error::::IdUnavailable, - ); - }); -} - -// Given -// - `asset_id` is for reserved assets, but already taken. -// - Whatever other params. -// Then -// - `create_asset` should not work. -#[test] -fn create_asset_with_a_taken_asset_id_should_not_work() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = Some(1); - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let asset_id = 9; - let initial_issuance = 100; - - assert_ok!(GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance, - permissions: default_permission.clone() - } - )); - assert_noop!( - GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance, - permissions: default_permission.clone() - } - ), - Error::::IdAlreadyTaken, - ); - }); -} - -// Given -// - `asset_id` provided. -// - `from_account` is None. -// - All permissions for origin. -// When -// - After calling create_asset. -// Then -// - Should create a reserved token. -#[test] -fn create_asset_should_create_a_reserved_asset_when_from_account_is_none() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = None; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let created_account_id = 0; - let asset_id = 9; - let initial_issuance = 100; - - assert_ok!(GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - // Test for a side effect. - assert_eq!( - >::get(&asset_id, &created_account_id), - initial_issuance - ); - }); -} - -// Given -// - `asset_id` not provided. -// - `from_account` is None. -// - All permissions for origin. -// When -// - After calling create_asset. -// Then -// - Should create a user token. -// - `NextAssetId`'s get should return a new value. -// - Should not create a `reserved_asset`. -#[test] -fn create_asset_should_create_a_user_asset() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = None; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let created_account_id = 0; - let reserved_asset_id = 100000; - let initial_issuance = 100; - let created_user_asset_id = 10; - - assert_ok!(GenericAsset::create_asset( - None, - from_account, - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - // Test for side effects. - assert_eq!(>::get(&reserved_asset_id, &created_account_id), 0); - assert_eq!( - >::get(&created_user_asset_id, &created_account_id), - initial_issuance - ); - assert_eq!(>::get(created_user_asset_id), initial_issuance); - }); -} - -#[test] -fn update_permission_should_raise_event() { - // Arrange - let staking_asset_id = 16000; - let asset_id = 1000; - let origin = 1; - let initial_balance = 1000; - let permissions = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - ExtBuilder::default() - .next_asset_id(asset_id) - .free_balance((staking_asset_id, origin, initial_balance)) - .build() - .execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: 0, - permissions: permissions.clone(), - } - )); - - // Act - assert_ok!(GenericAsset::update_permission( - Origin::signed(origin), - asset_id, - permissions.clone() - )); - - let expected_event = TestEvent::generic_asset( - RawEvent::PermissionUpdated(asset_id, permissions.clone()), - ); - // Assert - assert!(System::events().iter().any(|record| record.event == expected_event)); - }, - ); -} - -#[test] -fn mint_should_raise_event() { - // Arrange - let staking_asset_id = 16000; - let asset_id = 1000; - let origin = 1; - let initial_balance = 1000; - let permissions = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - let to = 2; - let amount = 100; - - ExtBuilder::default() - .next_asset_id(asset_id) - .free_balance((staking_asset_id, origin, initial_balance)) - .build() - .execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: 0, - permissions: permissions.clone(), - }, - )); - - // Act - assert_ok!(GenericAsset::mint(Origin::signed(origin), asset_id, to, amount)); - - let expected_event = TestEvent::generic_asset(RawEvent::Minted(asset_id, to, amount)); - - // Assert - assert!(System::events().iter().any(|record| record.event == expected_event)); - }, - ); -} - -#[test] -fn burn_should_raise_event() { - // Arrange - let staking_asset_id = 16000; - let asset_id = 1000; - let origin = 1; - let initial_balance = 1000; - let permissions = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - let amount = 100; - - ExtBuilder::default() - .next_asset_id(asset_id) - .free_balance((staking_asset_id, origin, initial_balance)) - .build() - .execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: amount, - permissions: permissions.clone(), - }, - )); - - // Act - assert_ok!(GenericAsset::burn(Origin::signed(origin), asset_id, origin, amount)); - - let expected_event = TestEvent::generic_asset(RawEvent::Burned(asset_id, origin, amount)); - - // Assert - assert!(System::events().iter().any(|record| record.event == expected_event)); - }, - ); -} diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 004255b9e1e336b2d27ce24649916b56773b52a1..6147458b0dbd74300f70dc03e0a5b03d0388a03c 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-grandpa" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for GRANDPA finality gadget" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,30 +15,30 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/finality-grandpa" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -pallet-finality-tracker = { version = "2.0.0-rc5", default-features = false, path = "../finality-tracker" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0", default-features = false, path = "../../primitives/finality-grandpa" } +sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } +pallet-session = { version = "2.0.0", default-features = false, path = "../session" } +pallet-finality-tracker = { version = "2.0.0", default-features = false, path = "../finality-tracker" } [dev-dependencies] -frame-benchmarking = { version = "2.0.0-rc5", path = "../benchmarking" } +frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } grandpa = { package = "finality-grandpa", version = "0.12.3", features = ["derive-codec"] } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-offences = { version = "2.0.0-rc5", path = "../offences" } -pallet-staking = { version = "2.0.0-rc5", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../timestamp" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-offences = { version = "2.0.0", path = "../offences" } +pallet-staking = { version = "2.0.0", path = "../staking" } +pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } +pallet-timestamp = { version = "2.0.0", path = "../timestamp" } [features] default = ["std"] diff --git a/frame/grandpa/README.md b/frame/grandpa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..84b181a8b31e1c102215f72d6a2d59724a9af596 --- /dev/null +++ b/frame/grandpa/README.md @@ -0,0 +1,12 @@ +GRANDPA Consensus module for runtime. + +This manages the GRANDPA authority set ready for the native code. +These authorities are only for GRANDPA finality, not for consensus overall. + +In the future, it will also handle misbehavior reports, and on-chain +finality notifications. + +For full integration with GRANDPA, the `GrandpaApi` should be implemented. +The necessary items are re-exported via the `fg_primitives` crate. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..4893fc2cf18600eebf9a38159546e740017f66f3 --- /dev/null +++ b/frame/grandpa/src/default_weights.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights for the GRANDPA Pallet +//! This file was not auto-generated. + +use frame_support::weights::{ + Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, +}; + +impl crate::WeightInfo for () { + fn report_equivocation(validator_count: u32) -> Weight { + // we take the validator set count from the membership proof to + // calculate the weight but we set a floor of 100 validators. + let validator_count = validator_count.max(100) as u64; + + // worst case we are considering is that the given offender + // is backed by 200 nominators + const MAX_NOMINATORS: u64 = 200; + + // checking membership proof + (35 * WEIGHT_PER_MICROS) + .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + .saturating_add(DbWeight::get().reads(5)) + // check equivocation proof + .saturating_add(95 * WEIGHT_PER_MICROS) + // report offence + .saturating_add(110 * WEIGHT_PER_MICROS) + .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) + .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) + // fetching set id -> session index mappings + .saturating_add(DbWeight::get().reads(2)) + } + + fn note_stalled() -> Weight { + (3 * WEIGHT_PER_MICROS) + .saturating_add(DbWeight::get().writes(1)) + } +} diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 961c0994607520f15201f96df6e6c504350a94f3..9a592ab9266f1e3156c06adebe478d94f3921981 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -40,8 +40,8 @@ use fg_primitives::{ GRANDPA_ENGINE_ID, }; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, storage, traits::KeyOwnerProofSystem, - Parameter, + decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResultWithPostInfo, + storage, traits::KeyOwnerProofSystem, weights::{Pays, Weight}, Parameter, }; use frame_system::{ensure_none, ensure_root, ensure_signed}; use pallet_finality_tracker::OnFinalizationStalled; @@ -54,6 +54,7 @@ use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::SessionIndex; mod equivocation; +mod default_weights; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; @@ -97,6 +98,14 @@ pub trait Trait: frame_system::Trait { /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime /// definition. type HandleEquivocation: HandleEquivocation; + + /// Weights for this pallet. + type WeightInfo: WeightInfo; +} + +pub trait WeightInfo { + fn report_equivocation(validator_count: u32) -> Weight; + fn note_stalled() -> Weight; } /// A stored pending change, old format. @@ -170,7 +179,7 @@ pub enum StoredState { decl_event! { pub enum Event { - /// New authority set has been applied. [authority_set] + /// New authority set has been applied. \[authority_set\] NewAuthorities(AuthorityList), /// Current authority set has been paused. Paused, @@ -242,19 +251,19 @@ decl_module! { /// equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence /// will be reported. - #[weight = weight_for::report_equivocation::(key_owner_proof.validator_count())] + #[weight = T::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation( origin, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, - ) { + ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; Self::do_report_equivocation( Some(reporter), equivocation_proof, key_owner_proof, - )?; + ) } /// Report voter equivocation/misbehavior. This method will verify the @@ -266,19 +275,19 @@ decl_module! { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. - #[weight = weight_for::report_equivocation::(key_owner_proof.validator_count())] + #[weight = T::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation_unsigned( origin, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, - ) { + ) -> DispatchResultWithPostInfo { ensure_none(origin)?; Self::do_report_equivocation( T::HandleEquivocation::block_author(), equivocation_proof, key_owner_proof, - )?; + ) } /// Note that the current authority set of the GRANDPA finality gadget has @@ -288,7 +297,7 @@ decl_module! { /// forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters /// will start the new authority set using the given finalized block as base. /// Only callable by root. - #[weight = weight_for::note_stalled::()] + #[weight = T::WeightInfo::note_stalled()] fn note_stalled( origin, delay: T::BlockNumber, @@ -364,45 +373,6 @@ decl_module! { } } -mod weight_for { - use frame_support::{ - traits::Get, - weights::{ - constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, - Weight, - }, - }; - - pub fn report_equivocation(validator_count: u32) -> Weight { - // we take the validator set count from the membership proof to - // calculate the weight but we set a floor of 100 validators. - let validator_count = validator_count.min(100) as u64; - - // worst case we are considering is that the given offender - // is backed by 200 nominators - const MAX_NOMINATORS: u64 = 200; - - // checking membership proof - (35 * WEIGHT_PER_MICROS) - .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) - .saturating_add(T::DbWeight::get().reads(5)) - // check equivocation proof - .saturating_add(95 * WEIGHT_PER_MICROS) - // report offence - .saturating_add(110 * WEIGHT_PER_MICROS) - .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) - .saturating_add(T::DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(T::DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) - // fetching set id -> session index mappings - .saturating_add(T::DbWeight::get().reads(2)) - } - - pub fn note_stalled() -> Weight { - (3 * WEIGHT_PER_MICROS) - .saturating_add(T::DbWeight::get().writes(1)) - } -} - impl Module { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { @@ -520,7 +490,7 @@ impl Module { reporter: Option, equivocation_proof: EquivocationProof, key_owner_proof: T::KeyOwnerProof, - ) -> Result<(), Error> { + ) -> DispatchResultWithPostInfo { // we check the equivocation within the context of its set id (and // associated session) and round. we also need to know the validator // set count when the offence since it is required to calculate the @@ -585,7 +555,10 @@ impl Module { set_id, round, ), - ).map_err(|_| Error::::DuplicateOffenceReport) + ).map_err(|_| Error::::DuplicateOffenceReport)?; + + // waive the fee since the report is valid and beneficial + Ok(Pays::No.into()) } /// Submits an extrinsic to report an equivocation. This method will create diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 684712df7d078362a5aebc5524c4b4c286bb9fe5..215a1f0a35932284796928edbb0ff2d761360a7d 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -41,21 +41,14 @@ use sp_runtime::{ }; use sp_staking::SessionIndex; -use frame_system as system; -use pallet_balances as balances; -use pallet_offences as offences; -use pallet_session as session; -use pallet_staking as staking; -use pallet_timestamp as timestamp; - impl_outer_origin! { pub enum Origin for Test {} } impl_outer_dispatch! { pub enum Call for Test where origin: Origin { - grandpa::Grandpa, - staking::Staking, + pallet_grandpa::Grandpa, + pallet_staking::Staking, } } @@ -67,12 +60,12 @@ impl_opaque_keys! { impl_outer_event! { pub enum TestEvent for Test { - system, - balances, - grandpa, - offences, - session, - staking, + frame_system, + pallet_balances, + pallet_grandpa, + pallet_offences, + pallet_session, + pallet_staking, } } @@ -107,14 +100,14 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); - type AccountData = balances::AccountData; + type PalletInfo = (); + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); } -impl system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::SendTransactionTypes for Test where Call: From, { @@ -129,22 +122,22 @@ parameter_types! { } /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. -impl session::Trait for Test { +impl pallet_session::Trait for Test { type Event = TestEvent; type ValidatorId = u64; - type ValidatorIdOf = staking::StashOf; - type ShouldEndSession = session::PeriodicSessions; - type NextSessionRotation = session::PeriodicSessions; - type SessionManager = session::historical::NoteHistoricalRoot; + type ValidatorIdOf = pallet_staking::StashOf; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = TestSessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = (); } -impl session::historical::Trait for Test { - type FullIdentification = staking::Exposure; - type FullIdentificationOf = staking::ExposureOf; +impl pallet_session::historical::Trait for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } parameter_types! { @@ -162,7 +155,8 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } -impl balances::Trait for Test { +impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u128; type DustRemoval = (); type Event = TestEvent; @@ -175,7 +169,7 @@ parameter_types! { pub const MinimumPeriod: u64 = 3; } -impl timestamp::Trait for Test { +impl pallet_timestamp::Trait for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -218,7 +212,7 @@ impl Convert for CurrencyToVoteHandler { } } -impl staking::Trait for Test { +impl pallet_staking::Trait for Test { type RewardRemainder = (); type CurrencyToVote = CurrencyToVoteHandler; type Event = TestEvent; @@ -228,9 +222,9 @@ impl staking::Trait for Test { type SessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = system::EnsureRoot; + type SlashCancelOrigin = frame_system::EnsureRoot; type SessionInterface = Self; - type UnixTime = timestamp::Module; + type UnixTime = pallet_timestamp::Module; type RewardCurve = RewardCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; @@ -246,12 +240,11 @@ parameter_types! { pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); } -impl offences::Trait for Test { +impl pallet_offences::Trait for Test { type Event = TestEvent; - type IdentificationTuple = session::historical::IdentificationTuple; + type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; - type WeightInfo = (); } impl Trait for Test { @@ -269,9 +262,11 @@ impl Trait for Test { )>>::IdentificationTuple; type HandleEquivocation = super::EquivocationHandler; + + type WeightInfo = (); } -mod grandpa { +mod pallet_grandpa { pub use crate::Event; } @@ -331,7 +326,7 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx i as u64, i as u64 + 1000, 10_000, - staking::StakerStatus::::Validator, + pallet_staking::StakerStatus::::Validator, ) }) .collect(); @@ -342,18 +337,18 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx // NOTE: this will initialize the grandpa authorities // through OneSessionHandler::on_genesis_session - session::GenesisConfig:: { keys: session_keys } + pallet_session::GenesisConfig:: { keys: session_keys } .assimilate_storage(&mut t) .unwrap(); - balances::GenesisConfig:: { balances } + pallet_balances::GenesisConfig:: { balances } .assimilate_storage(&mut t) .unwrap(); - let staking_config = staking::GenesisConfig:: { + let staking_config = pallet_staking::GenesisConfig:: { stakers, validator_count: 8, - force_era: staking::Forcing::ForceNew, + force_era: pallet_staking::Forcing::ForceNew, minimum_validator_count: 0, invulnerables: vec![], ..Default::default() diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 9eca2cc381371ab8fddf0bd86a5b3c5c0bdd8430..4916808fe000f3f9919cb9c2f6668f40d2039477 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -26,6 +26,7 @@ use fg_primitives::ScheduledChange; use frame_support::{ assert_err, assert_ok, traits::{Currency, OnFinalize}, + weights::{GetDispatchInfo, Pays}, }; use frame_system::{EventRecord, Phase}; use pallet_session::OneSessionHandler; @@ -842,3 +843,89 @@ fn always_schedules_a_change_on_new_session_when_stalled() { assert_eq!(Grandpa::current_set_id(), 2); }); } + +#[test] +fn report_equivocation_has_valid_weight() { + // the weight depends on the size of the validator set, + // but there's a lower bound of 100 validators. + assert!( + (1..=100) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] == w[1]) + ); + + // after 100 validators the weight should keep increasing + // with every extra validator. + assert!( + (100..=1000) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] < w[1]) + ); +} + +#[test] +fn valid_equivocation_reports_dont_pay_fees() { + let authorities = test_authorities(); + + new_test_ext_raw_authorities(authorities).execute_with(|| { + start_era(1); + + let equivocation_key = &Grandpa::grandpa_authorities()[0].0; + let equivocation_keyring = extract_keyring(equivocation_key); + let set_id = Grandpa::current_set_id(); + + // generate an equivocation proof. + let equivocation_proof = generate_equivocation_proof( + set_id, + (1, H256::random(), 10, &equivocation_keyring), + (1, H256::random(), 10, &equivocation_keyring), + ); + + // create the key ownership proof. + let key_owner_proof = + Historical::prove((sp_finality_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); + + // check the dispatch info for the call. + let info = Call::::report_equivocation_unsigned( + equivocation_proof.clone(), + key_owner_proof.clone(), + ) + .get_dispatch_info(); + + // it should have non-zero weight and the fee has to be paid. + assert!(info.weight > 0); + assert_eq!(info.pays_fee, Pays::Yes); + + // report the equivocation. + let post_info = Grandpa::report_equivocation_unsigned( + Origin::none(), + equivocation_proof.clone(), + key_owner_proof.clone(), + ) + .unwrap(); + + // the original weight should be kept, but given that the report + // is valid the fee is waived. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::No); + + // report the equivocation again which is invalid now since it is + // duplicate. + let post_info = Grandpa::report_equivocation_unsigned( + Origin::none(), + equivocation_proof, + key_owner_proof, + ) + .err() + .unwrap() + .post_info; + + // the fee is not waived and the original weight is kept. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::Yes); + }) +} diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 99e147b3141cee0419ce0a463dae864220da813f..08777c44ad2b16b89f039fa8e1bfa4780eceba53 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-identity" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME identity management pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/identity/README.md b/frame/identity/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8927febec6bbdc5bf7ace28039c9fde6bf693afb --- /dev/null +++ b/frame/identity/README.md @@ -0,0 +1,56 @@ +# Identity Module + +- [`identity::Trait`](https://docs.rs/pallet-identity/latest/pallet_identity/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-identity/latest/pallet_identity/enum.Call.html) + +## Overview + +A federated naming system, allowing for multiple registrars to be added from a specified origin. +Registrars can set a fee to provide identity-verification service. Anyone can put forth a +proposed identity for a fixed deposit and ask for review by any number of registrars (paying +each of their fees). Registrar judgements are given as an `enum`, allowing for sophisticated, +multi-tier opinions. + +Some judgements are identified as *sticky*, which means they cannot be removed except by +complete removal of the identity, or by the registrar. Judgements are allowed to represent a +portion of funds that have been reserved for the registrar. + +A super-user can remove accounts and in doing so, slash the deposit. + +All accounts may also have a limited number of sub-accounts which may be specified by the owner; +by definition, these have equivalent ownership and each has an individual name. + +The number of registrars should be limited, and the deposit made sufficiently large, to ensure +no state-bloat attack is viable. + +## Interface + +### Dispatchable Functions + +#### For general users +* `set_identity` - Set the associated identity of an account; a small deposit is reserved if not + already taken. +* `clear_identity` - Remove an account's associated identity; the deposit is returned. +* `request_judgement` - Request a judgement from a registrar, paying a fee. +* `cancel_request` - Cancel the previous request for a judgement. + +#### For general users with sub-identities +* `set_subs` - Set the sub-accounts of an identity. +* `add_sub` - Add a sub-identity to an identity. +* `remove_sub` - Remove a sub-identity of an identity. +* `rename_sub` - Rename a sub-identity of an identity. +* `quit_sub` - Remove a sub-identity of an identity (called by the sub-identity). + +#### For registrars +* `set_fee` - Set the fee required to be paid for a judgement to be given by the registrar. +* `set_fields` - Set the fields that a registrar cares about in their judgements. +* `provide_judgement` - Provide a judgement to an identity. + +#### For super-users +* `add_registrar` - Add a new registrar to the system. +* `kill_identity` - Forcibly remove the associated identity; the deposit is lost. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 8b0cb0c27cf0e1c98312fcbaf3ceee613ade5af7..d39df27017b7173e2f1b7f8cd2064bcb138c514c 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -21,30 +21,34 @@ use super::*; -use frame_system::RawOrigin; -use sp_io::hashing::blake2_256; -use frame_benchmarking::benchmarks; +use frame_system::{EventRecord, RawOrigin}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Identity; -// Support Functions -fn account(name: &'static str, index: u32) -> T::AccountId { - let entropy = (name, index).using_encoded(blake2_256); - T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() +const SEED: u32 = 0; + +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); } // Adds `r` registrars to the Identity Pallet. These registrars will have set fees and fields. fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { - let _ = T::Currency::make_free_balance_be(&account::("registrar", i), BalanceOf::::max_value()); - Identity::::add_registrar(RawOrigin::Root.into(), account::("registrar", i))?; - Identity::::set_fee(RawOrigin::Signed(account::("registrar", i)).into(), i.into(), 10.into())?; + let registrar: T::AccountId = account("registrar", i, SEED); + let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); + Identity::::add_registrar(RawOrigin::Root.into(), registrar.clone())?; + Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i.into(), 10.into())?; let fields = IdentityFields( IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter ); - Identity::::set_fields(RawOrigin::Signed(account::("registrar", i)).into(), i.into(), fields)?; + Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), i.into(), fields)?; } assert_eq!(Registrars::::get().len(), r as usize); @@ -59,7 +63,7 @@ fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result("sub", i); + let sub_account = account("sub", i, SEED); subs.push((sub_account, data.clone())); } @@ -110,13 +114,13 @@ benchmarks! { let p in 1 .. T::MaxSubAccounts::get() => (); let s in 1 .. T::MaxSubAccounts::get() => { // Give them s many sub accounts - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; }; let x in 1 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields let info = create_identity_info::(x); - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, info)?; }; @@ -124,7 +128,11 @@ benchmarks! { add_registrar { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - }: _(RawOrigin::Root, account::("registrar", r + 1)) + ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); + }: _(RawOrigin::Root, account("registrar", r + 1, SEED)) + verify { + ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); + } set_identity { let r in ...; @@ -133,7 +141,7 @@ benchmarks! { let x in _ .. _ => (); let caller = { // The target user - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -146,7 +154,7 @@ benchmarks! { for i in 0..r { Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; Identity::::provide_judgement( - RawOrigin::Signed(account::("registrar", i)).into(), + RawOrigin::Signed(account("registrar", i, SEED)).into(), i, caller_lookup.clone(), Judgement::Reasonable @@ -154,68 +162,44 @@ benchmarks! { } caller }; - }: _( - RawOrigin::Signed(caller), - create_identity_info::(x) - ) - - set_subs { - let caller = account::("caller", 0); + }: _(RawOrigin::Signed(caller.clone()), create_identity_info::(x)) + verify { + assert_last_event::(Event::::IdentitySet(caller).into()); + } - // Give them p many previous sub accounts. - let p in 1 .. T::MaxSubAccounts::get() => { - let _ = add_sub_accounts::(&caller, p)?; - }; + // We need to split `set_subs` into two benchmarks to accurately isolate the potential + // writes caused by new or old sub accounts. The actual weight should simply be + // the sum of these two weights. + set_subs_new { + let caller: T::AccountId = whitelisted_caller(); // Create a new subs vec with s sub accounts let s in 1 .. T::MaxSubAccounts::get() => (); let subs = create_sub_accounts::(&caller, s)?; + ensure!(SubsOf::::get(&caller).1.len() == 0, "Caller already has subs"); + }: set_subs(RawOrigin::Signed(caller.clone()), subs) + verify { + ensure!(SubsOf::::get(&caller).1.len() as u32 == s, "Subs not added"); + } - }: _(RawOrigin::Signed(caller), subs) - - add_sub { - let caller = account::("caller", 0); - + set_subs_old { + let caller: T::AccountId = whitelisted_caller(); // Give them p many previous sub accounts. - let p in 1 .. T::MaxSubAccounts::get() - 1 => { + let p in 1 .. T::MaxSubAccounts::get() => { let _ = add_sub_accounts::(&caller, p)?; }; - let sub = account::("new_sub", 0); - let data = Data::Raw(vec![0; 32]); - }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub), data) - - rename_sub { - let caller = account::("caller", 0); - - let p in 1 .. T::MaxSubAccounts::get(); - - // Give them p many previous sub accounts. - let (sub, _) = add_sub_accounts::(&caller, p)?.remove(0); - let data = Data::Raw(vec![1; 32]); - - }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub), data) - - remove_sub { - let caller = account::("caller", 0); - - // Give them p many previous sub accounts. - let p in 1 .. T::MaxSubAccounts::get(); - let (sub, _) = add_sub_accounts::(&caller, p)?.remove(0); - }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub)) - - quit_sub { - let caller = account::("caller", 0); - let sup = account::("super", 0); - - // Give them p many previous sub accounts. - let p in 1 .. T::MaxSubAccounts::get() - 1 => { - let _ = add_sub_accounts::(&sup, p)?; - }; - let sup_origin = RawOrigin::Signed(sup).into(); - Identity::::add_sub(sup_origin, T::Lookup::unlookup(caller.clone()), Data::Raw(vec![0; 32]))?; - }: _(RawOrigin::Signed(caller)) + // Remove all subs. + let subs = create_sub_accounts::(&caller, 0)?; + ensure!( + SubsOf::::get(&caller).1.len() as u32 == p, + "Caller does have subs", + ); + }: set_subs(RawOrigin::Signed(caller.clone()), subs) + verify { + ensure!(SubsOf::::get(&caller).1.len() == 0, "Subs not removed"); + } clear_identity { - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -228,24 +212,31 @@ benchmarks! { for i in 0..r { Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; Identity::::provide_judgement( - RawOrigin::Signed(account::("registrar", i)).into(), + RawOrigin::Signed(account("registrar", i, SEED)).into(), i, caller_lookup.clone(), Judgement::Reasonable )?; } - }: _(RawOrigin::Signed(caller)) + ensure!(IdentityOf::::contains_key(&caller), "Identity does not exist."); + }: _(RawOrigin::Signed(caller.clone())) + verify { + ensure!(!IdentityOf::::contains_key(&caller), "Identity not cleared."); + } request_judgement { - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in ...; let x in ...; - }: _(RawOrigin::Signed(caller), r - 1, 10.into()) + }: _(RawOrigin::Signed(caller.clone()), r - 1, 10.into()) + verify { + assert_last_event::(Event::::JudgementRequested(caller, r-1).into()); + } cancel_request { - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -253,27 +244,42 @@ benchmarks! { let x in ...; Identity::::request_judgement(caller_origin, r - 1, 10.into())?; - }: _(RawOrigin::Signed(caller), r - 1) + }: _(RawOrigin::Signed(caller.clone()), r - 1) + verify { + assert_last_event::(Event::::JudgementUnrequested(caller, r-1).into()); + } set_fee { - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - }: _(RawOrigin::Signed(caller), r, 10.into()) + let registrars = Registrars::::get(); + ensure!(registrars[r as usize].as_ref().unwrap().fee == 0.into(), "Fee already set."); + }: _(RawOrigin::Signed(caller), r, 100.into()) + verify { + let registrars = Registrars::::get(); + ensure!(registrars[r as usize].as_ref().unwrap().fee == 100.into(), "Fee not changed."); + } set_account_id { - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - }: _(RawOrigin::Signed(caller), r, account::("new", 0)) + let registrars = Registrars::::get(); + ensure!(registrars[r as usize].as_ref().unwrap().account == caller.clone(), "id not set."); + }: _(RawOrigin::Signed(caller), r, account("new", 0, SEED)) + verify { + let registrars = Registrars::::get(); + ensure!(registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), "id not changed."); + } set_fields { - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; @@ -283,16 +289,22 @@ benchmarks! { IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter ); + let registrars = Registrars::::get(); + ensure!(registrars[r as usize].as_ref().unwrap().fields == Default::default(), "fields already set."); }: _(RawOrigin::Signed(caller), r, fields) + verify { + let registrars = Registrars::::get(); + ensure!(registrars[r as usize].as_ref().unwrap().fields != Default::default(), "fields not set."); + } provide_judgement { // The user - let user = account::("user", r); + let user: T::AccountId = account("user", r, SEED); let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); - let caller = account::("caller", 0); + let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; @@ -305,28 +317,91 @@ benchmarks! { Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; Identity::::request_judgement(user_origin.clone(), r, 10.into())?; }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) + verify { + assert_last_event::(Event::::JudgementGiven(user, r).into()) + } kill_identity { - let caller = account::("caller", 0); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in ...; - let s in ...; - let x in ...; + // Setting up our own account below. + let s in _ .. _ => {}; + let x in _ .. _ => {}; + + let target: T::AccountId = account("target", 0, SEED); + let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); + let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + + let info = create_identity_info::(x); + Identity::::set_identity(target_origin.clone(), info)?; + let _ = add_sub_accounts::(&target, s)?; // User requests judgement from all the registrars, and they approve for i in 0..r { - Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; + Identity::::request_judgement(target_origin.clone(), i, 10.into())?; Identity::::provide_judgement( - RawOrigin::Signed(account::("registrar", i)).into(), + RawOrigin::Signed(account("registrar", i, SEED)).into(), i, - caller_lookup.clone(), + target_lookup.clone(), Judgement::Reasonable )?; } - }: _(RawOrigin::Root, caller_lookup) + ensure!(IdentityOf::::contains_key(&target), "Identity not set"); + }: _(RawOrigin::Root, target_lookup) + verify { + ensure!(!IdentityOf::::contains_key(&target), "Identity not removed"); + } + + add_sub { + let s in 1 .. T::MaxSubAccounts::get() - 1; + + let caller: T::AccountId = whitelisted_caller(); + let _ = add_sub_accounts::(&caller, s)?; + let sub = account("new_sub", 0, SEED); + let data = Data::Raw(vec![0; 32]); + ensure!(SubsOf::::get(&caller).1.len() as u32 == s, "Subs not set."); + }: _(RawOrigin::Signed(caller.clone()), T::Lookup::unlookup(sub), data) + verify { + ensure!(SubsOf::::get(&caller).1.len() as u32 == s + 1, "Subs not added."); + } + + rename_sub { + let s in 1 .. T::MaxSubAccounts::get(); + + let caller: T::AccountId = whitelisted_caller(); + let (sub, _) = add_sub_accounts::(&caller, s)?.remove(0); + let data = Data::Raw(vec![1; 32]); + ensure!(SuperOf::::get(&sub).unwrap().1 != data, "data already set"); + }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()), data.clone()) + verify { + ensure!(SuperOf::::get(&sub).unwrap().1 == data, "data not set"); + } + + remove_sub { + let s in 1 .. T::MaxSubAccounts::get(); + + let caller: T::AccountId = whitelisted_caller(); + let (sub, _) = add_sub_accounts::(&caller, s)?.remove(0); + ensure!(SuperOf::::contains_key(&sub), "Sub doesn't exists"); + }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone())) + verify { + ensure!(!SuperOf::::contains_key(&sub), "Sub not removed"); + } + + quit_sub { + let s in 1 .. T::MaxSubAccounts::get() - 1; + + let caller: T::AccountId = whitelisted_caller(); + let sup = account("super", 0, SEED); + let _ = add_sub_accounts::(&sup, s)?; + let sup_origin = RawOrigin::Signed(sup).into(); + Identity::::add_sub(sup_origin, T::Lookup::unlookup(caller.clone()), Data::Raw(vec![0; 32]))?; + ensure!(SuperOf::::contains_key(&caller), "Sub doesn't exists"); + }: _(RawOrigin::Signed(caller.clone())) + verify { + ensure!(!SuperOf::::contains_key(&caller), "Sub not removed"); + } + } #[cfg(test)] @@ -340,7 +415,8 @@ mod tests { new_test_ext().execute_with(|| { assert_ok!(test_benchmark_add_registrar::()); assert_ok!(test_benchmark_set_identity::()); - assert_ok!(test_benchmark_set_subs::()); + assert_ok!(test_benchmark_set_subs_new::()); + assert_ok!(test_benchmark_set_subs_old::()); assert_ok!(test_benchmark_clear_identity::()); assert_ok!(test_benchmark_request_judgement::()); assert_ok!(test_benchmark_cancel_request::()); @@ -349,6 +425,10 @@ mod tests { assert_ok!(test_benchmark_set_fields::()); assert_ok!(test_benchmark_provide_judgement::()); assert_ok!(test_benchmark_kill_identity::()); + assert_ok!(test_benchmark_add_sub::()); + assert_ok!(test_benchmark_rename_sub::()); + assert_ok!(test_benchmark_remove_sub::()); + assert_ok!(test_benchmark_quit_sub::()); }); } } diff --git a/frame/identity/src/default_weights.rs b/frame/identity/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..93b1c89ab93ddd6d1ab5b74917df3b79ab0e6095 --- /dev/null +++ b/frame/identity/src/default_weights.rs @@ -0,0 +1,135 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn add_registrar(r: u32, ) -> Weight { + (39_603_000 as Weight) + .saturating_add((418_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_identity(r: u32, x: u32, ) -> Weight { + (110_679_000 as Weight) + .saturating_add((389_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_985_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_subs_new(s: u32, ) -> Weight { + (78_697_000 as Weight) + .saturating_add((15_225_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(DbWeight::get().writes(1 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn set_subs_old(p: u32, ) -> Weight { + (71_308_000 as Weight) + .saturating_add((5_772_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { + (91_553_000 as Weight) + .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_749_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_621_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn request_judgement(r: u32, x: u32, ) -> Weight { + (110_856_000 as Weight) + .saturating_add((496_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_221_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel_request(r: u32, x: u32, ) -> Weight { + (96_857_000 as Weight) + .saturating_add((311_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_204_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_fee(r: u32, ) -> Weight { + (16_276_000 as Weight) + .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_account_id(r: u32, ) -> Weight { + (18_530_000 as Weight) + .saturating_add((391_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_fields(r: u32, ) -> Weight { + (16_359_000 as Weight) + .saturating_add((379_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn provide_judgement(r: u32, x: u32, ) -> Weight { + (72_869_000 as Weight) + .saturating_add((423_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((3_187_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + (123_199_000 as Weight) + .saturating_add((71_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_730_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn add_sub(s: u32, ) -> Weight { + (110_070_000 as Weight) + .saturating_add((262_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn rename_sub(s: u32, ) -> Weight { + (37_130_000 as Weight) + .saturating_add((79_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_sub(s: u32, ) -> Weight { + (103_295_000 as Weight) + .saturating_add((235_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn quit_sub(s: u32, ) -> Weight { + (65_716_000 as Weight) + .saturating_add((227_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 1607835f2414b0daef7029ac91d99b7e24db5b1d..1ff69af9a903635de9b235fafd045db4e16415a3 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -86,7 +86,10 @@ use frame_support::{ }; use frame_system::ensure_signed; +#[cfg(test)] +mod tests; mod benchmarking; +mod default_weights; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; @@ -94,7 +97,12 @@ type NegativeImbalanceOf = <::Currency as Currency< Weight; fn set_identity(r: u32, x: u32, ) -> Weight; - fn set_subs(p: u32, s: u32, ) -> Weight; + fn set_subs_new(s: u32, ) -> Weight; + fn set_subs_old(p: u32, ) -> Weight; + fn add_sub(p: u32, ) -> Weight; + fn rename_sub(p: u32, ) -> Weight; + fn remove_sub(p: u32, ) -> Weight; + fn quit_sub(p: u32, ) -> Weight; fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight; fn request_judgement(r: u32, x: u32, ) -> Weight; fn cancel_request(r: u32, x: u32, ) -> Weight; @@ -103,28 +111,6 @@ pub trait WeightInfo { fn set_fields(r: u32, ) -> Weight; fn provide_judgement(r: u32, x: u32, ) -> Weight; fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight; - fn add_sub(p: u32, ) -> Weight; - fn rename_sub() -> Weight; - fn remove_sub(p: u32, ) -> Weight; - fn quit_sub(p: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn add_registrar(_r: u32, ) -> Weight { 1_000_000_000 } - fn set_identity(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } - fn set_subs(_p: u32, _s: u32, ) -> Weight { 1_000_000_000 } - fn clear_identity(_r: u32, _s: u32, _x: u32, ) -> Weight { 1_000_000_000 } - fn request_judgement(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } - fn cancel_request(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } - fn set_fee(_r: u32, ) -> Weight { 1_000_000_000 } - fn set_account_id(_r: u32, ) -> Weight { 1_000_000_000 } - fn set_fields(_r: u32, ) -> Weight { 1_000_000_000 } - fn provide_judgement(_r: u32, _x: u32, ) -> Weight { 1_000_000_000 } - fn kill_identity(_r: u32, _s: u32, _x: u32, ) -> Weight { 1_000_000_000 } - fn add_sub(_p: u32, ) -> Weight { 1_000_000_000 } - fn rename_sub() -> Weight { 1_000_000_000 } - fn remove_sub(_p: u32, ) -> Weight { 1_000_000_000 } - fn quit_sub(_p: u32, ) -> Weight { 1_000_000_000 } } pub trait Trait: frame_system::Trait { @@ -462,27 +448,27 @@ decl_storage! { decl_event!( pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { - /// A name was set or reset (which will remove all judgements). [who] + /// A name was set or reset (which will remove all judgements). \[who\] IdentitySet(AccountId), - /// A name was cleared, and the given balance returned. [who, deposit] + /// A name was cleared, and the given balance returned. \[who, deposit\] IdentityCleared(AccountId, Balance), - /// A name was removed and the given balance slashed. [who, deposit] + /// A name was removed and the given balance slashed. \[who, deposit\] IdentityKilled(AccountId, Balance), - /// A judgement was asked from a registrar. [who, registrar_index] + /// A judgement was asked from a registrar. \[who, registrar_index\] JudgementRequested(AccountId, RegistrarIndex), - /// A judgement request was retracted. [who, registrar_index] + /// A judgement request was retracted. \[who, registrar_index\] JudgementUnrequested(AccountId, RegistrarIndex), - /// A judgement was given by a registrar. [target, registrar_index] + /// A judgement was given by a registrar. \[target, registrar_index\] JudgementGiven(AccountId, RegistrarIndex), - /// A registrar was added. [registrar_index] + /// A registrar was added. \[registrar_index\] RegistrarAdded(RegistrarIndex), - /// A sub-identity was added to an identity and the deposit paid. [sub, main, deposit] + /// A sub-identity was added to an identity and the deposit paid. \[sub, main, deposit\] SubIdentityAdded(AccountId, AccountId, Balance), /// A sub-identity was removed from an identity and the deposit freed. - /// [sub, main, deposit] + /// \[sub, main, deposit\] SubIdentityRemoved(AccountId, AccountId, Balance), /// A sub-identity was cleared, and the given deposit repatriated from the - /// main identity account to the sub-identity account. [sub, main, deposit] + /// main identity account to the sub-identity account. \[sub, main, deposit\] SubIdentityRevoked(AccountId, AccountId, Balance), } ); @@ -525,161 +511,6 @@ decl_error! { } } -/// Functions for calcuating the weight of dispatchables. -mod weight_for { - use frame_support::{traits::Get, weights::Weight}; - use super::Trait; - - /// Weight calculation for `add_registrar`. - /// - /// Based on benchmark: - /// 22.24 + R * 0.371 µs (min squares analysis) - pub(crate) fn add_registrar( - registrars: Weight - ) -> Weight { - T::DbWeight::get().reads_writes(1, 1) - + 23_000_000 // constant - + 380_000 * registrars // R - } - - /// Weight calculation for `set_identity`. - /// - /// Based on benchmark: - /// 50.64 + R * 0.215 + X * 1.424 µs (min squares analysis) - pub(crate) fn set_identity( - judgements: Weight, - extra_fields: Weight - ) -> Weight { - T::DbWeight::get().reads_writes(1, 1) - + 51_000_000 // constant - + 220_000 * judgements // R - + 1_500_000 * extra_fields // X - } - - /// Weight calculation for `set_subs`. - /// - /// Based on benchmark: - /// 36.21 + P * 2.481 + S * 3.633 µs (min squares analysis) - pub(crate) fn set_subs( - old_subs: Weight, - subs: Weight - ) -> Weight { - let db = T::DbWeight::get(); - db.reads(1) // storage-exists (`IdentityOf::contains_key`) - .saturating_add(db.reads_writes(1, old_subs)) // `SubsOf::get` read + P old DB deletions - .saturating_add(db.writes(subs + 1)) // S + 1 new DB writes - .saturating_add(37_000_000) // constant - .saturating_add(2_500_000 * old_subs) // P - .saturating_add(subs.saturating_mul(3_700_000)) // S - } - - /// Weight calculation for `clear_identity`. - /// - /// Based on benchmark: - /// 43.19 + R * 0.099 + S * 2.547 + X * 0.875 µs (min squares analysis) - pub(crate) fn clear_identity( - judgements: Weight, - subs: Weight, - extra_fields: Weight - ) -> Weight { - T::DbWeight::get().reads_writes(2, subs + 2) // S + 2 deletions - + 44_000_000 // constant - + 100_000 * judgements // R - + 2_600_000 * subs // S - + 900_000 * extra_fields // X - } - - /// Weight calculation for `request_judgement`. - /// - /// Based on benchmark: - /// 51.51 + R * 0.32 + X * 1.85 µs (min squares analysis) - pub(crate) fn request_judgement( - judgements: Weight, - extra_fields: Weight - ) -> Weight { - T::DbWeight::get().reads_writes(2, 1) - + 52_000_000 // constant - + 400_000 * judgements // R - + 1_900_000 * extra_fields // X - } - - /// Weight calculation for `cancel_request`. - /// - /// Based on benchmark: - /// 40.95 + R * 0.219 + X * 1.655 µs (min squares analysis) - pub(crate) fn cancel_request( - judgements: Weight, - extra_fields: Weight - ) -> Weight { - T::DbWeight::get().reads_writes(1, 1) - + 41_000_000 // constant - + 300_000 * judgements // R - + 1_700_000 * extra_fields // X - } - - /// Weight calculation for `provide_judgement`. - /// - /// Based on benchmark: - /// 40.77 + R * 0.282 + X * 1.66 µs (min squares analysis) - pub(crate) fn provide_judgement( - judgements: Weight, - extra_fields: Weight - ) -> Weight { - T::DbWeight::get().reads_writes(2, 1) - + 41_000_000 // constant - + 300_000 * judgements // R - + 1_700_000 * extra_fields// X - } - - /// Weight calculation for `kill_identity`. - /// - /// Based on benchmark: - /// 83.96 + R * 0.122 + S * 2.533 + X * 0.867 µs (min squares analysis) - pub(crate) fn kill_identity( - judgements: Weight, - subs: Weight, - extra_fields: Weight - ) -> Weight { - let db = T::DbWeight::get(); - db.reads_writes(2, subs + 2) // 2 `take`s + S deletions - + db.reads_writes(1, 1) // balance ops - + 84_000_000 // constant - + 130_000 * judgements // R - + 2_600_000 * subs // S - + 900_000 * extra_fields // X - } - - /// Weight calculation for `add_sub`. - pub(crate) fn add_sub( - subs: Weight, - ) -> Weight { - let db = T::DbWeight::get(); - db.reads_writes(4, 3) + 124_000_000 + 156_000 * subs - } - - /// Weight calculation for `rename_sub`. - pub(crate) fn rename_sub() -> Weight { - let db = T::DbWeight::get(); - db.reads_writes(2, 1) + 30_000_000 - } - - /// Weight calculation for `remove_sub`. - pub(crate) fn remove_sub( - subs: Weight, - ) -> Weight { - let db = T::DbWeight::get(); - db.reads_writes(4, 3) + 86_000_000 + 50_000 * subs - } - - /// Weight calculation for `quit_sub`. - pub(crate) fn quit_sub( - subs: Weight, - ) -> Weight { - let db = T::DbWeight::get(); - db.reads_writes(3, 2) + 63_000_000 + 230_000 * subs - } -} - decl_module! { /// Identity module declaration. pub struct Module for enum Call where origin: T::Origin { @@ -722,7 +553,7 @@ decl_module! { /// - One storage mutation (codec `O(R)`). /// - One event. /// # - #[weight = weight_for::add_registrar::(T::MaxRegistrars::get().into()) ] + #[weight = T::WeightInfo::add_registrar(T::MaxRegistrars::get()) ] fn add_registrar(origin, account: T::AccountId) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; @@ -738,7 +569,7 @@ decl_module! { Self::deposit_event(RawEvent::RegistrarAdded(i)); - Ok(Some(weight_for::add_registrar::(registrar_count as Weight)).into()) + Ok(Some(T::WeightInfo::add_registrar(registrar_count as u32)).into()) } /// Set an account's identity information and reserve the appropriate deposit. @@ -760,7 +591,7 @@ decl_module! { /// - One storage mutation (codec-read `O(X' + R)`, codec-write `O(X + R)`). /// - One event. /// # - #[weight = weight_for::set_identity::( + #[weight = T::WeightInfo::set_identity( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] @@ -789,13 +620,13 @@ decl_module! { let _ = T::Currency::unreserve(&sender, old_deposit - id.deposit); } - let judgements = id.judgements.len() as Weight; + let judgements = id.judgements.len(); >::insert(&sender, id); Self::deposit_event(RawEvent::IdentitySet(sender)); - Ok(Some(weight_for::set_identity::( - judgements, // R - extra_fields as Weight // X + Ok(Some(T::WeightInfo::set_identity( + judgements as u32, // R + extra_fields // X )).into()) } @@ -820,10 +651,15 @@ decl_module! { /// - One storage write (codec complexity `O(S)`). /// - One storage-exists (`IdentityOf::contains_key`). /// # - #[weight = weight_for::set_subs::( - T::MaxSubAccounts::get().into(), // P - subs.len() as Weight // S - )] + // TODO: This whole extrinsic screams "not optimized". For example we could + // filter any overlap between new and old subs, and avoid reading/writing + // to those values... We could also ideally avoid needing to write to + // N storage items for N sub accounts. Right now the weight on this function + // is a large overestimate due to the fact that it could potentially write + // to 2 x T::MaxSubAccounts::get(). + #[weight = T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. + .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) // S: Assume all subs are new. + ] fn set_subs(origin, subs: Vec<(T::AccountId, Data)>) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; ensure!(>::contains_key(&sender), Error::::NotFound); @@ -837,11 +673,10 @@ decl_module! { if old_deposit < new_deposit { T::Currency::reserve(&sender, new_deposit - old_deposit)?; - } - // do nothing if they're equal. - if old_deposit > new_deposit { + } else if old_deposit > new_deposit { let _ = T::Currency::unreserve(&sender, old_deposit - new_deposit); } + // do nothing if they're equal. for s in old_ids.iter() { >::remove(s); @@ -850,7 +685,7 @@ decl_module! { >::insert(&id, (sender.clone(), name)); id }).collect::>(); - let new_subs = ids.len() as Weight; + let new_subs = ids.len(); if ids.is_empty() { >::remove(&sender); @@ -858,10 +693,10 @@ decl_module! { >::insert(&sender, (new_deposit, ids)); } - Ok(Some(weight_for::set_subs::( - old_ids.len() as Weight, // P - new_subs // S - )).into()) + Ok(Some( + T::WeightInfo::set_subs_old(old_ids.len() as u32) // P: Real number of old accounts removed. + .saturating_add(T::WeightInfo::set_subs_new(new_subs as u32)) // S: New subs added. + ).into()) } /// Clear an account's identity info and all sub-accounts and return all deposits. @@ -882,7 +717,7 @@ decl_module! { /// - `2` storage reads and `S + 2` storage deletions. /// - One event. /// # - #[weight = weight_for::clear_identity::( + #[weight = T::WeightInfo::clear_identity( T::MaxRegistrars::get().into(), // R T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X @@ -901,10 +736,10 @@ decl_module! { Self::deposit_event(RawEvent::IdentityCleared(sender, deposit)); - Ok(Some(weight_for::clear_identity::( - id.judgements.len() as Weight, // R - sub_ids.len() as Weight, // S - id.info.additional.len() as Weight // X + Ok(Some(T::WeightInfo::clear_identity( + id.judgements.len() as u32, // R + sub_ids.len() as u32, // S + id.info.additional.len() as u32 // X )).into()) } @@ -931,7 +766,7 @@ decl_module! { /// - Storage: 1 read `O(R)`, 1 mutate `O(X + R)`. /// - One event. /// # - #[weight = weight_for::request_judgement::( + #[weight = T::WeightInfo::request_judgement( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] @@ -958,13 +793,16 @@ decl_module! { T::Currency::reserve(&sender, registrar.fee)?; - let judgements = id.judgements.len() as Weight; - let extra_fields = id.info.additional.len() as Weight; + let judgements = id.judgements.len(); + let extra_fields = id.info.additional.len(); >::insert(&sender, id); Self::deposit_event(RawEvent::JudgementRequested(sender, reg_index)); - Ok(Some(weight_for::request_judgement::(judgements, extra_fields)).into()) + Ok(Some(T::WeightInfo::request_judgement( + judgements as u32, + extra_fields as u32, + )).into()) } /// Cancel a previous request. @@ -984,7 +822,7 @@ decl_module! { /// - One storage mutation `O(R + X)`. /// - One event /// # - #[weight = weight_for::cancel_request::( + #[weight = T::WeightInfo::cancel_request( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] @@ -1001,13 +839,16 @@ decl_module! { }; let _ = T::Currency::unreserve(&sender, fee); - let judgements = id.judgements.len() as Weight; - let extra_fields = id.info.additional.len() as Weight; + let judgements = id.judgements.len(); + let extra_fields = id.info.additional.len(); >::insert(&sender, id); Self::deposit_event(RawEvent::JudgementUnrequested(sender, reg_index)); - Ok(Some(weight_for::request_judgement::(judgements, extra_fields)).into()) + Ok(Some(T::WeightInfo::cancel_request( + judgements as u32, + extra_fields as u32 + )).into()) } /// Set the fee required for a judgement to be requested from a registrar. @@ -1023,10 +864,7 @@ decl_module! { /// - One storage mutation `O(R)`. /// - Benchmark: 7.315 + R * 0.329 µs (min squares analysis) /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - + 7_400_000 // constant - + 330_000 * T::MaxRegistrars::get() as Weight // R - ] + #[weight = T::WeightInfo::set_fee(T::MaxRegistrars::get())] // R fn set_fee(origin, #[compact] index: RegistrarIndex, #[compact] fee: BalanceOf, @@ -1040,9 +878,7 @@ decl_module! { .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; - Ok(Some(T::DbWeight::get().reads_writes(1, 1) - + 7_400_000 + 330_000 * registrars as Weight // R - ).into()) + Ok(Some(T::WeightInfo::set_fee(registrars as u32)).into()) // R } /// Change the account associated with a registrar. @@ -1058,10 +894,7 @@ decl_module! { /// - One storage mutation `O(R)`. /// - Benchmark: 8.823 + R * 0.32 µs (min squares analysis) /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - + 8_900_000 // constant - + 320_000 * T::MaxRegistrars::get() as Weight // R - ] + #[weight = T::WeightInfo::set_account_id(T::MaxRegistrars::get())] // R fn set_account_id(origin, #[compact] index: RegistrarIndex, new: T::AccountId, @@ -1075,9 +908,7 @@ decl_module! { .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; - Ok(Some(T::DbWeight::get().reads_writes(1, 1) - + 8_900_000 + 320_000 * registrars as Weight // R - ).into()) + Ok(Some(T::WeightInfo::set_account_id(registrars as u32)).into()) // R } /// Set the field information for a registrar. @@ -1093,10 +924,7 @@ decl_module! { /// - One storage mutation `O(R)`. /// - Benchmark: 7.464 + R * 0.325 µs (min squares analysis) /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - + 7_500_000 // constant - + 330_000 * T::MaxRegistrars::get() as Weight // R - ] + #[weight = T::WeightInfo::set_fields(T::MaxRegistrars::get())] // R fn set_fields(origin, #[compact] index: RegistrarIndex, fields: IdentityFields, @@ -1110,9 +938,9 @@ decl_module! { .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; - Ok(Some(T::DbWeight::get().reads_writes(1, 1) - + 7_500_000 + 330_000 * registrars as Weight // R - ).into()) + Ok(Some(T::WeightInfo::set_fields( + registrars as u32 // R + )).into()) } /// Provide a judgement for an account's identity. @@ -1134,7 +962,7 @@ decl_module! { /// - Storage: 1 read `O(R)`, 1 mutate `O(R + X)`. /// - One event. /// # - #[weight = weight_for::provide_judgement::( + #[weight = T::WeightInfo::provide_judgement( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X )] @@ -1164,12 +992,15 @@ decl_module! { Err(position) => id.judgements.insert(position, item), } - let judgements = id.judgements.len() as Weight; - let extra_fields = id.info.additional.len() as Weight; + let judgements = id.judgements.len(); + let extra_fields = id.info.additional.len(); >::insert(&target, id); Self::deposit_event(RawEvent::JudgementGiven(target, reg_index)); - Ok(Some(weight_for::provide_judgement::(judgements, extra_fields)).into()) + Ok(Some(T::WeightInfo::provide_judgement( + judgements as u32, + extra_fields as u32, + )).into()) } /// Remove an account's identity and sub-account information and slash the deposits. @@ -1191,7 +1022,7 @@ decl_module! { /// - `S + 2` storage mutations. /// - One event. /// # - #[weight = weight_for::kill_identity::( + #[weight = T::WeightInfo::kill_identity( T::MaxRegistrars::get().into(), // R T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X @@ -1213,10 +1044,10 @@ decl_module! { Self::deposit_event(RawEvent::IdentityKilled(target, deposit)); - Ok(Some(weight_for::kill_identity::( - id.judgements.len() as Weight, // R - sub_ids.len() as Weight, // S - id.info.additional.len() as Weight // X + Ok(Some(T::WeightInfo::kill_identity( + id.judgements.len() as u32, // R + sub_ids.len() as u32, // S + id.info.additional.len() as u32 // X )).into()) } @@ -1227,9 +1058,7 @@ decl_module! { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. - #[weight = weight_for::add_sub::( - T::MaxSubAccounts::get().into(), // S - )] + #[weight = T::WeightInfo::add_sub(T::MaxSubAccounts::get())] fn add_sub(origin, sub: ::Source, data: Data) -> DispatchResult { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; @@ -1257,7 +1086,7 @@ decl_module! { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. - #[weight = weight_for::rename_sub::()] + #[weight = T::WeightInfo::rename_sub(T::MaxSubAccounts::get())] fn rename_sub(origin, sub: ::Source, data: Data) { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; @@ -1273,9 +1102,7 @@ decl_module! { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. - #[weight = weight_for::remove_sub::( - T::MaxSubAccounts::get().into(), // S - )] + #[weight = T::WeightInfo::remove_sub(T::MaxSubAccounts::get())] fn remove_sub(origin, sub: ::Source) { let sender = ensure_signed(origin)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); @@ -1302,9 +1129,7 @@ decl_module! { /// /// NOTE: This should not normally be used, but is provided in the case that the non- /// controller of an account is maliciously registered as a sub-account. - #[weight = weight_for::quit_sub::( - T::MaxSubAccounts::get().into(), // S - )] + #[weight = T::WeightInfo::quit_sub(T::MaxSubAccounts::get())] fn quit_sub(origin) { let sender = ensure_signed(origin)?; let (sup, _) = SuperOf::::take(&sender).ok_or(Error::::NotSub)?; @@ -1328,459 +1153,3 @@ impl Module { .collect() } } - -#[cfg(test)] -mod tests { - use super::*; - - use sp_runtime::traits::BadOrigin; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types, - }; - use sp_core::H256; - use frame_system::{EnsureSignedBy, EnsureOneOf, EnsureRoot}; - use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - } - parameter_types! { - pub const BasicDeposit: u64 = 10; - pub const FieldDeposit: u64 = 10; - pub const SubAccountDeposit: u64 = 10; - pub const MaxSubAccounts: u32 = 2; - pub const MaxAdditionalFields: u32 = 2; - pub const MaxRegistrars: u32 = 20; - } - ord_parameter_types! { - pub const One: u64 = 1; - pub const Two: u64 = 2; - } - type EnsureOneOrRoot = EnsureOneOf< - u64, - EnsureRoot, - EnsureSignedBy - >; - type EnsureTwoOrRoot = EnsureOneOf< - u64, - EnsureRoot, - EnsureSignedBy - >; - impl Trait for Test { - type Event = (); - type Currency = Balances; - type Slashed = (); - type BasicDeposit = BasicDeposit; - type FieldDeposit = FieldDeposit; - type SubAccountDeposit = SubAccountDeposit; - type MaxSubAccounts = MaxSubAccounts; - type MaxAdditionalFields = MaxAdditionalFields; - type MaxRegistrars = MaxRegistrars; - type RegistrarOrigin = EnsureOneOrRoot; - type ForceOrigin = EnsureTwoOrRoot; - type WeightInfo = (); - } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Identity = Module; - - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10), - (2, 10), - (3, 10), - (10, 100), - (20, 100), - (30, 100), - ], - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - fn ten() -> IdentityInfo { - IdentityInfo { - display: Data::Raw(b"ten".to_vec()), - legal: Data::Raw(b"The Right Ordinal Ten, Esq.".to_vec()), - .. Default::default() - } - } - - fn twenty() -> IdentityInfo { - IdentityInfo { - display: Data::Raw(b"twenty".to_vec()), - legal: Data::Raw(b"The Right Ordinal Twenty, Esq.".to_vec()), - .. Default::default() - } - } - - #[test] - fn editing_subaccounts_should_work() { - new_test_ext().execute_with(|| { - let data = |x| Data::Raw(vec![x; 1]); - - assert_noop!(Identity::add_sub(Origin::signed(10), 20, data(1)), Error::::NoIdentity); - - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - - // first sub account - assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); - assert_eq!(SuperOf::::get(1), Some((10, data(1)))); - assert_eq!(Balances::free_balance(10), 80); - - // second sub account - assert_ok!(Identity::add_sub(Origin::signed(10), 2, data(2))); - assert_eq!(SuperOf::::get(1), Some((10, data(1)))); - assert_eq!(SuperOf::::get(2), Some((10, data(2)))); - assert_eq!(Balances::free_balance(10), 70); - - // third sub account is too many - assert_noop!(Identity::add_sub(Origin::signed(10), 3, data(3)), Error::::TooManySubAccounts); - - // rename first sub account - assert_ok!(Identity::rename_sub(Origin::signed(10), 1, data(11))); - assert_eq!(SuperOf::::get(1), Some((10, data(11)))); - assert_eq!(SuperOf::::get(2), Some((10, data(2)))); - assert_eq!(Balances::free_balance(10), 70); - - // remove first sub account - assert_ok!(Identity::remove_sub(Origin::signed(10), 1)); - assert_eq!(SuperOf::::get(1), None); - assert_eq!(SuperOf::::get(2), Some((10, data(2)))); - assert_eq!(Balances::free_balance(10), 80); - - // add third sub account - assert_ok!(Identity::add_sub(Origin::signed(10), 3, data(3))); - assert_eq!(SuperOf::::get(1), None); - assert_eq!(SuperOf::::get(2), Some((10, data(2)))); - assert_eq!(SuperOf::::get(3), Some((10, data(3)))); - assert_eq!(Balances::free_balance(10), 70); - }); - } - - #[test] - fn resolving_subaccount_ownership_works() { - new_test_ext().execute_with(|| { - let data = |x| Data::Raw(vec![x; 1]); - - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_identity(Origin::signed(20), twenty())); - - // 10 claims 1 as a subaccount - assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Balances::reserved_balance(10), 20); - // 20 cannot claim 1 now - assert_noop!(Identity::add_sub(Origin::signed(20), 1, data(1)), Error::::AlreadyClaimed); - // 1 wants to be with 20 so it quits from 10 - assert_ok!(Identity::quit_sub(Origin::signed(1))); - // 1 gets the 10 that 10 paid. - assert_eq!(Balances::free_balance(1), 20); - assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Balances::reserved_balance(10), 10); - // 20 can claim 1 now - assert_ok!(Identity::add_sub(Origin::signed(20), 1, data(1))); - }); - } - - #[test] - fn trailing_zeros_decodes_into_default_data() { - let encoded = Data::Raw(b"Hello".to_vec()).encode(); - assert!(<(Data, Data)>::decode(&mut &encoded[..]).is_err()); - let input = &mut &encoded[..]; - let (a, b) = <(Data, Data)>::decode(&mut AppendZerosInput::new(input)).unwrap(); - assert_eq!(a, Data::Raw(b"Hello".to_vec())); - assert_eq!(b, Data::None); - } - - #[test] - fn adding_registrar_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); - assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); - assert_eq!(Identity::registrars(), vec![ - Some(RegistrarInfo { account: 3, fee: 10, fields }) - ]); - }); - } - - #[test] - fn amount_of_registrars_is_limited() { - new_test_ext().execute_with(|| { - for i in 1..MaxRegistrars::get() + 1 { - assert_ok!(Identity::add_registrar(Origin::signed(1), i as u64)); - } - let last_registrar = MaxRegistrars::get() as u64 + 1; - assert_noop!( - Identity::add_registrar(Origin::signed(1), last_registrar), - Error::::TooManyRegistrars - ); - }); - } - - #[test] - fn registration_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - let mut three_fields = ten(); - three_fields.additional.push(Default::default()); - three_fields.additional.push(Default::default()); - three_fields.additional.push(Default::default()); - assert_noop!( - Identity::set_identity(Origin::signed(10), three_fields), - Error::::TooManyFields - ); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_eq!(Identity::identity(10).unwrap().info, ten()); - assert_eq!(Balances::free_balance(10), 90); - assert_ok!(Identity::clear_identity(Origin::signed(10))); - assert_eq!(Balances::free_balance(10), 100); - assert_noop!(Identity::clear_identity(Origin::signed(10)), Error::::NotNamed); - }); - } - - #[test] - fn uninvited_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), - Error::::InvalidIndex - ); - - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), - Error::::InvalidTarget - ); - - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!( - Identity::provide_judgement(Origin::signed(10), 0, 10, Judgement::Reasonable), - Error::::InvalidIndex - ); - assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::FeePaid(1)), - Error::::InvalidJudgement - ); - - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_eq!(Identity::identity(10).unwrap().judgements, vec![(0, Judgement::Reasonable)]); - }); - } - - #[test] - fn clearing_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_ok!(Identity::clear_identity(Origin::signed(10))); - assert_eq!(Identity::identity(10), None); - }); - } - - #[test] - fn killing_slashing_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!(Identity::kill_identity(Origin::signed(1), 10), BadOrigin); - assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); - assert_eq!(Identity::identity(10), None); - assert_eq!(Balances::free_balance(10), 90); - assert_noop!(Identity::kill_identity(Origin::signed(2), 10), Error::::NotNamed); - }); - } - - #[test] - fn setting_subaccounts_should_work() { - new_test_ext().execute_with(|| { - let mut subs = vec![(20, Data::Raw(vec![40; 1]))]; - assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::NotFound); - - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Identity::subs_of(10), (10, vec![20])); - assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); - - // push another item and re-set it. - subs.push((30, Data::Raw(vec![50; 1]))); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 70); - assert_eq!(Identity::subs_of(10), (20, vec![20, 30])); - assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); - assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); - - // switch out one of the items and re-set. - subs[0] = (40, Data::Raw(vec![60; 1])); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 70); // no change in the balance - assert_eq!(Identity::subs_of(10), (20, vec![40, 30])); - assert_eq!(Identity::super_of(20), None); - assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); - assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1])))); - - // clear - assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); - assert_eq!(Balances::free_balance(10), 90); - assert_eq!(Identity::subs_of(10), (0, vec![])); - assert_eq!(Identity::super_of(30), None); - assert_eq!(Identity::super_of(40), None); - - subs.push((20, Data::Raw(vec![40; 1]))); - assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::TooManySubAccounts); - }); - } - - #[test] - fn clearing_account_should_remove_subaccounts_and_refund() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); - assert_ok!(Identity::clear_identity(Origin::signed(10))); - assert_eq!(Balances::free_balance(10), 100); - assert!(Identity::super_of(20).is_none()); - }); - } - - #[test] - fn killing_account_should_remove_subaccounts_and_not_refund() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); - assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); - assert_eq!(Balances::free_balance(10), 80); - assert!(Identity::super_of(20).is_none()); - }); - } - - #[test] - fn cancelling_requested_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NoIdentity); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); - assert_ok!(Identity::cancel_request(Origin::signed(10), 0)); - assert_eq!(Balances::free_balance(10), 90); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); - - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::JudgementGiven); - }); - } - - #[test] - fn requesting_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 9), Error::::FeeChanged); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); - // 10 for the judgement request, 10 for the identity. - assert_eq!(Balances::free_balance(10), 80); - - // Re-requesting won't work as we already paid. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Erroneous)); - // Registrar got their payment now. - assert_eq!(Balances::free_balance(3), 20); - - // Re-requesting still won't work as it's erroneous. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); - - // Requesting from a second registrar still works. - assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); - assert_ok!(Identity::request_judgement(Origin::signed(10), 1, 10)); - - // Re-requesting after the judgement has been reduced works. - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::OutOfDate)); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); - }); - } - - #[test] - fn field_deposit_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), IdentityInfo { - additional: vec![ - (Data::Raw(b"number".to_vec()), Data::Raw(10u32.encode())), - (Data::Raw(b"text".to_vec()), Data::Raw(b"10".to_vec())), - ], .. Default::default() - })); - assert_eq!(Balances::free_balance(10), 70); - }); - } - - #[test] - fn setting_account_id_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - // account 4 cannot change the first registrar's identity since it's owned by 3. - assert_noop!(Identity::set_account_id(Origin::signed(4), 0, 3), Error::::InvalidIndex); - // account 3 can, because that's the registrar's current account. - assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); - // account 4 can now, because that's their new ID. - assert_ok!(Identity::set_account_id(Origin::signed(4), 0, 3)); - }); - } -} diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..0637ac6aafc5f6cb9bbd6427339d46285c041891 --- /dev/null +++ b/frame/identity/src/tests.rs @@ -0,0 +1,472 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests for Identity Pallet + +use super::*; + +use sp_runtime::traits::BadOrigin; +use frame_support::{ + assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, + ord_parameter_types, +}; +use sp_core::H256; +use frame_system::{EnsureSignedBy, EnsureOneOf, EnsureRoot}; +use sp_runtime::{ + Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Trait for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type BlockExecutionWeight = (); + type ExtrinsicBaseWeight = (); + type MaximumExtrinsicWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type PalletInfo = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type WeightInfo = (); +} +parameter_types! { + pub const BasicDeposit: u64 = 10; + pub const FieldDeposit: u64 = 10; + pub const SubAccountDeposit: u64 = 10; + pub const MaxSubAccounts: u32 = 2; + pub const MaxAdditionalFields: u32 = 2; + pub const MaxRegistrars: u32 = 20; +} +ord_parameter_types! { + pub const One: u64 = 1; + pub const Two: u64 = 2; +} +type EnsureOneOrRoot = EnsureOneOf< + u64, + EnsureRoot, + EnsureSignedBy +>; +type EnsureTwoOrRoot = EnsureOneOf< + u64, + EnsureRoot, + EnsureSignedBy +>; +impl Trait for Test { + type Event = (); + type Currency = Balances; + type Slashed = (); + type BasicDeposit = BasicDeposit; + type FieldDeposit = FieldDeposit; + type SubAccountDeposit = SubAccountDeposit; + type MaxSubAccounts = MaxSubAccounts; + type MaxAdditionalFields = MaxAdditionalFields; + type MaxRegistrars = MaxRegistrars; + type RegistrarOrigin = EnsureOneOrRoot; + type ForceOrigin = EnsureTwoOrRoot; + type WeightInfo = (); +} +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Identity = Module; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 10), + (2, 10), + (3, 10), + (10, 100), + (20, 100), + (30, 100), + ], + }.assimilate_storage(&mut t).unwrap(); + t.into() +} + +fn ten() -> IdentityInfo { + IdentityInfo { + display: Data::Raw(b"ten".to_vec()), + legal: Data::Raw(b"The Right Ordinal Ten, Esq.".to_vec()), + .. Default::default() + } +} + +fn twenty() -> IdentityInfo { + IdentityInfo { + display: Data::Raw(b"twenty".to_vec()), + legal: Data::Raw(b"The Right Ordinal Twenty, Esq.".to_vec()), + .. Default::default() + } +} + +#[test] +fn editing_subaccounts_should_work() { + new_test_ext().execute_with(|| { + let data = |x| Data::Raw(vec![x; 1]); + + assert_noop!(Identity::add_sub(Origin::signed(10), 20, data(1)), Error::::NoIdentity); + + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + + // first sub account + assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); + assert_eq!(SuperOf::::get(1), Some((10, data(1)))); + assert_eq!(Balances::free_balance(10), 80); + + // second sub account + assert_ok!(Identity::add_sub(Origin::signed(10), 2, data(2))); + assert_eq!(SuperOf::::get(1), Some((10, data(1)))); + assert_eq!(SuperOf::::get(2), Some((10, data(2)))); + assert_eq!(Balances::free_balance(10), 70); + + // third sub account is too many + assert_noop!(Identity::add_sub(Origin::signed(10), 3, data(3)), Error::::TooManySubAccounts); + + // rename first sub account + assert_ok!(Identity::rename_sub(Origin::signed(10), 1, data(11))); + assert_eq!(SuperOf::::get(1), Some((10, data(11)))); + assert_eq!(SuperOf::::get(2), Some((10, data(2)))); + assert_eq!(Balances::free_balance(10), 70); + + // remove first sub account + assert_ok!(Identity::remove_sub(Origin::signed(10), 1)); + assert_eq!(SuperOf::::get(1), None); + assert_eq!(SuperOf::::get(2), Some((10, data(2)))); + assert_eq!(Balances::free_balance(10), 80); + + // add third sub account + assert_ok!(Identity::add_sub(Origin::signed(10), 3, data(3))); + assert_eq!(SuperOf::::get(1), None); + assert_eq!(SuperOf::::get(2), Some((10, data(2)))); + assert_eq!(SuperOf::::get(3), Some((10, data(3)))); + assert_eq!(Balances::free_balance(10), 70); + }); +} + +#[test] +fn resolving_subaccount_ownership_works() { + new_test_ext().execute_with(|| { + let data = |x| Data::Raw(vec![x; 1]); + + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_identity(Origin::signed(20), twenty())); + + // 10 claims 1 as a subaccount + assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(10), 80); + assert_eq!(Balances::reserved_balance(10), 20); + // 20 cannot claim 1 now + assert_noop!(Identity::add_sub(Origin::signed(20), 1, data(1)), Error::::AlreadyClaimed); + // 1 wants to be with 20 so it quits from 10 + assert_ok!(Identity::quit_sub(Origin::signed(1))); + // 1 gets the 10 that 10 paid. + assert_eq!(Balances::free_balance(1), 20); + assert_eq!(Balances::free_balance(10), 80); + assert_eq!(Balances::reserved_balance(10), 10); + // 20 can claim 1 now + assert_ok!(Identity::add_sub(Origin::signed(20), 1, data(1))); + }); +} + +#[test] +fn trailing_zeros_decodes_into_default_data() { + let encoded = Data::Raw(b"Hello".to_vec()).encode(); + assert!(<(Data, Data)>::decode(&mut &encoded[..]).is_err()); + let input = &mut &encoded[..]; + let (a, b) = <(Data, Data)>::decode(&mut AppendZerosInput::new(input)).unwrap(); + assert_eq!(a, Data::Raw(b"Hello".to_vec())); + assert_eq!(b, Data::None); +} + +#[test] +fn adding_registrar_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); + assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); + assert_eq!(Identity::registrars(), vec![ + Some(RegistrarInfo { account: 3, fee: 10, fields }) + ]); + }); +} + +#[test] +fn amount_of_registrars_is_limited() { + new_test_ext().execute_with(|| { + for i in 1..MaxRegistrars::get() + 1 { + assert_ok!(Identity::add_registrar(Origin::signed(1), i as u64)); + } + let last_registrar = MaxRegistrars::get() as u64 + 1; + assert_noop!( + Identity::add_registrar(Origin::signed(1), last_registrar), + Error::::TooManyRegistrars + ); + }); +} + +#[test] +fn registration_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + let mut three_fields = ten(); + three_fields.additional.push(Default::default()); + three_fields.additional.push(Default::default()); + three_fields.additional.push(Default::default()); + assert_noop!( + Identity::set_identity(Origin::signed(10), three_fields), + Error::::TooManyFields + ); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_eq!(Identity::identity(10).unwrap().info, ten()); + assert_eq!(Balances::free_balance(10), 90); + assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_eq!(Balances::free_balance(10), 100); + assert_noop!(Identity::clear_identity(Origin::signed(10)), Error::::NotNamed); + }); +} + +#[test] +fn uninvited_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_noop!( + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), + Error::::InvalidIndex + ); + + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_noop!( + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), + Error::::InvalidTarget + ); + + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_noop!( + Identity::provide_judgement(Origin::signed(10), 0, 10, Judgement::Reasonable), + Error::::InvalidIndex + ); + assert_noop!( + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::FeePaid(1)), + Error::::InvalidJudgement + ); + + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); + assert_eq!(Identity::identity(10).unwrap().judgements, vec![(0, Judgement::Reasonable)]); + }); +} + +#[test] +fn clearing_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); + assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_eq!(Identity::identity(10), None); + }); +} + +#[test] +fn killing_slashing_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_noop!(Identity::kill_identity(Origin::signed(1), 10), BadOrigin); + assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); + assert_eq!(Identity::identity(10), None); + assert_eq!(Balances::free_balance(10), 90); + assert_noop!(Identity::kill_identity(Origin::signed(2), 10), Error::::NotNamed); + }); +} + +#[test] +fn setting_subaccounts_should_work() { + new_test_ext().execute_with(|| { + let mut subs = vec![(20, Data::Raw(vec![40; 1]))]; + assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::NotFound); + + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_eq!(Balances::free_balance(10), 80); + assert_eq!(Identity::subs_of(10), (10, vec![20])); + assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); + + // push another item and re-set it. + subs.push((30, Data::Raw(vec![50; 1]))); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_eq!(Balances::free_balance(10), 70); + assert_eq!(Identity::subs_of(10), (20, vec![20, 30])); + assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); + assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); + + // switch out one of the items and re-set. + subs[0] = (40, Data::Raw(vec![60; 1])); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_eq!(Balances::free_balance(10), 70); // no change in the balance + assert_eq!(Identity::subs_of(10), (20, vec![40, 30])); + assert_eq!(Identity::super_of(20), None); + assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); + assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1])))); + + // clear + assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); + assert_eq!(Balances::free_balance(10), 90); + assert_eq!(Identity::subs_of(10), (0, vec![])); + assert_eq!(Identity::super_of(30), None); + assert_eq!(Identity::super_of(40), None); + + subs.push((20, Data::Raw(vec![40; 1]))); + assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::TooManySubAccounts); + }); +} + +#[test] +fn clearing_account_should_remove_subaccounts_and_refund() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); + assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_eq!(Balances::free_balance(10), 100); + assert!(Identity::super_of(20).is_none()); + }); +} + +#[test] +fn killing_account_should_remove_subaccounts_and_not_refund() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); + assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); + assert_eq!(Balances::free_balance(10), 80); + assert!(Identity::super_of(20).is_none()); + }); +} + +#[test] +fn cancelling_requested_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NoIdentity); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + assert_ok!(Identity::cancel_request(Origin::signed(10), 0)); + assert_eq!(Balances::free_balance(10), 90); + assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); + + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); + assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::JudgementGiven); + }); +} + +#[test] +fn requesting_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 9), Error::::FeeChanged); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + // 10 for the judgement request, 10 for the identity. + assert_eq!(Balances::free_balance(10), 80); + + // Re-requesting won't work as we already paid. + assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Erroneous)); + // Registrar got their payment now. + assert_eq!(Balances::free_balance(3), 20); + + // Re-requesting still won't work as it's erroneous. + assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); + + // Requesting from a second registrar still works. + assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); + assert_ok!(Identity::request_judgement(Origin::signed(10), 1, 10)); + + // Re-requesting after the judgement has been reduced works. + assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::OutOfDate)); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + }); +} + +#[test] +fn field_deposit_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::set_identity(Origin::signed(10), IdentityInfo { + additional: vec![ + (Data::Raw(b"number".to_vec()), Data::Raw(10u32.encode())), + (Data::Raw(b"text".to_vec()), Data::Raw(b"10".to_vec())), + ], .. Default::default() + })); + assert_eq!(Balances::free_balance(10), 70); + }); +} + +#[test] +fn setting_account_id_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + // account 4 cannot change the first registrar's identity since it's owned by 3. + assert_noop!(Identity::set_account_id(Origin::signed(4), 0, 3), Error::::InvalidIndex); + // account 3 can, because that's the registrar's current account. + assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); + // account 4 can now, because that's their new ID. + assert_ok!(Identity::set_account_id(Origin::signed(4), 0, 3)); + }); +} diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 75fc4e2454ccae7bac567376e38d350f661edb2e..ef22d676887328b85e4c581b60cbad5944787c72 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -1,31 +1,32 @@ [package] name = "pallet-im-online" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME's I'm online pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } +pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../session" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0", default-features = false, path = "../session" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [features] default = ["std", "pallet-session/historical"] diff --git a/frame/im-online/README.md b/frame/im-online/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9a65bb6a980861830b28987134528002f14eb202 --- /dev/null +++ b/frame/im-online/README.md @@ -0,0 +1,51 @@ +# I'm online Module + +If the local node is a validator (i.e. contains an authority key), this module +gossips a heartbeat transaction with each new session. The heartbeat functions +as a simple mechanism to signal that the node is online in the current era. + +Received heartbeats are tracked for one era and reset with each new era. The +module exposes two public functions to query if a heartbeat has been received +in the current era or session. + +The heartbeat is a signed transaction, which was signed using the session key +and includes the recent best block number of the local validators chain as well +as the `NetworkState`. +It is submitted as an Unsigned Transaction via off-chain workers. + +- [`im_online::Trait`](https://docs.rs/pallet-im-online/latest/pallet_im_online/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-im-online/latest/pallet_im_online/enum.Call.html) +- [`Module`](https://docs.rs/pallet-im-online/latest/pallet_im_online/struct.Module.html) + +## Interface + +### Public Functions + +- `is_online` - True if the validator sent a heartbeat in the current session. + +## Usage + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; +use pallet_im_online::{self as im_online}; + +pub trait Trait: im_online::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { + let _sender = ensure_signed(origin)?; + let _is_online = >::is_online(authority_index); + Ok(()) + } + } +} +``` + +## Dependencies + +This module depends on the [Session module](https://docs.rs/pallet-session/latest/pallet_session/). + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index 92d9b9d5a5364524065b263af8e422628b847420..b92be023ce480013cdfa2345027a6b48cdd4cc46 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -23,7 +23,8 @@ use super::*; use frame_system::RawOrigin; use frame_benchmarking::benchmarks; -use sp_core::offchain::{OpaquePeerId, OpaqueMultiaddr}; +use sp_core::OpaquePeerId; +use sp_core::offchain::OpaqueMultiaddr; use sp_runtime::traits::{ValidateUnsigned, Zero}; use sp_runtime::transaction_validity::TransactionSource; use frame_support::traits::UnfilteredDispatchable; @@ -64,12 +65,14 @@ pub fn create_heartbeat(k: u32, e: u32) -> benchmarks! { _{ } + #[extra] heartbeat { let k in 1 .. MAX_KEYS; let e in 1 .. MAX_EXTERNAL_ADDRESSES; let (input_heartbeat, signature) = create_heartbeat::(k, e)?; }: _(RawOrigin::None, input_heartbeat, signature) + #[extra] validate_unsigned { let k in 1 .. MAX_KEYS; let e in 1 .. MAX_EXTERNAL_ADDRESSES; diff --git a/frame/im-online/src/default_weight.rs b/frame/im-online/src/default_weight.rs new file mode 100644 index 0000000000000000000000000000000000000000..e6efb42f2e3d8684142262620218b4f9d23ea0e7 --- /dev/null +++ b/frame/im-online/src/default_weight.rs @@ -0,0 +1,33 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { + (139830000 as Weight) + .saturating_add((211000 as Weight).saturating_mul(k as Weight)) + .saturating_add((654000 as Weight).saturating_mul(e as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 01b7b999dd004749e6bc25e579486db1b5773205..ef9c6b9182af62f82511f8c242a1ec06c6deb284 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -72,6 +72,7 @@ mod mock; mod tests; mod benchmarking; +mod default_weight; use sp_application_crypto::RuntimeAppPublic; use codec::{Encode, Decode}; @@ -227,17 +228,9 @@ pub struct Heartbeat } pub trait WeightInfo { - fn heartbeat(k: u32, e: u32, ) -> Weight; - fn validate_unsigned(k: u32, e: u32, ) -> Weight; fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight; } -impl WeightInfo for () { - fn heartbeat(_k: u32, _e: u32, ) -> Weight { 1_000_000_000 } - fn validate_unsigned(_k: u32, _e: u32, ) -> Weight { 1_000_000_000 } - fn validate_unsigned_and_then_heartbeat(_k: u32, _e: u32, ) -> Weight { 1_000_000_000 } -} - pub trait Trait: SendTransactionTypes> + pallet_session::historical::Trait { /// The identifier type for an authority. type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord; @@ -276,11 +269,11 @@ decl_event!( ::AuthorityId, IdentificationTuple = IdentificationTuple, { - /// A new heartbeat was received from `AuthorityId` [authority_id] + /// A new heartbeat was received from `AuthorityId` \[authority_id\] HeartbeatReceived(AuthorityId), /// At the end of the session, no offence was committed. AllGood, - /// At the end of the session, at least one validator was found to be [offline]. + /// At the end of the session, at least one validator was found to be \[offline\]. SomeOffline(Vec), } ); @@ -333,23 +326,20 @@ decl_module! { fn deposit_event() = default; /// # - /// - Complexity: `O(K + E)` where K is length of `Keys` and E is length of - /// `Heartbeat.network_state.external_address` - /// + /// - Complexity: `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) + /// and E is length of `heartbeat.network_state.external_address` /// - `O(K)`: decoding of length `K` /// - `O(E)`: decoding/encoding of length `E` /// - DbReads: pallet_session `Validators`, pallet_session `CurrentIndex`, `Keys`, /// `ReceivedHeartbeats` /// - DbWrites: `ReceivedHeartbeats` /// # - // NOTE: the weight include cost of validate_unsigned as it is part of the cost to import - // block with such an extrinsic. - #[weight = (310_000_000 + T::DbWeight::get().reads_writes(4, 1)) - .saturating_add(750_000.saturating_mul(heartbeat.validators_len as Weight)) - .saturating_add( - 1_200_000.saturating_mul(heartbeat.network_state.external_addresses.len() as Weight) - ) - ] + // NOTE: the weight includes the cost of validate_unsigned as it is part of the cost to + // import block with such an extrinsic. + #[weight = ::WeightInfo::validate_unsigned_and_then_heartbeat( + heartbeat.validators_len as u32, + heartbeat.network_state.external_addresses.len() as u32, + )] fn heartbeat( origin, heartbeat: Heartbeat, diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 29fe6acb3337cd3aeecc503b1da2b0cfdd984e05..dae4bb3447e56423ac4a3f4c479f09fdbeab8e03 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -130,7 +130,7 @@ impl frame_system::Trait for Runtime { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 835d8440e6d5b0e0f16caf750d841361a3012edc..22c6b4464c37073077f02bb9eca7d9032d9042c8 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -21,8 +21,8 @@ use super::*; use crate::mock::*; +use sp_core::OpaquePeerId; use sp_core::offchain::{ - OpaquePeerId, OffchainExt, TransactionPoolExt, testing::{TestOffchainExt, TestTransactionPoolExt}, diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index 85ab36bc039053802b4e1855e0b2944353cc2e48..aea8dbf1a866e1439e54f163967385016f28c3a3 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-indices" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME indices management pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0-rc5", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/indices/README.md b/frame/indices/README.md new file mode 100644 index 0000000000000000000000000000000000000000..243392780db28e9e6b941a5bb1e7d3c79d78887d --- /dev/null +++ b/frame/indices/README.md @@ -0,0 +1,4 @@ +An index is a short form of an address. This module handles allocation +of indices for a newly created accounts. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index a6b543bb43f4abac986c537ddc1fedf258d27e54..382bf07f1136e8226d46f5fc3c5096de7776bc16 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Indices; @@ -32,10 +32,8 @@ benchmarks! { _ { } claim { - // Index being claimed - let i in 0 .. 1000; - let account_index = T::AccountIndex::from(i); - let caller: T::AccountId = account("caller", 0, SEED); + let account_index = T::AccountIndex::from(SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); }: _(RawOrigin::Signed(caller.clone()), account_index) verify { @@ -43,13 +41,11 @@ benchmarks! { } transfer { - // Index being claimed - let i in 0 .. 1000; - let account_index = T::AccountIndex::from(i); + let account_index = T::AccountIndex::from(SEED); // Setup accounts - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let recipient: T::AccountId = account("recipient", i, SEED); + let recipient: T::AccountId = account("recipient", 0, SEED); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; @@ -59,11 +55,9 @@ benchmarks! { } free { - // Index being claimed - let i in 0 .. 1000; - let account_index = T::AccountIndex::from(i); + let account_index = T::AccountIndex::from(SEED); // Setup accounts - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; @@ -73,13 +67,11 @@ benchmarks! { } force_transfer { - // Index being claimed - let i in 0 .. 1000; - let account_index = T::AccountIndex::from(i); + let account_index = T::AccountIndex::from(SEED); // Setup accounts let original: T::AccountId = account("original", 0, SEED); T::Currency::make_free_balance_be(&original, BalanceOf::::max_value()); - let recipient: T::AccountId = account("recipient", i, SEED); + let recipient: T::AccountId = account("recipient", 0, SEED); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(original).into(), account_index)?; @@ -89,11 +81,9 @@ benchmarks! { } freeze { - // Index being claimed - let i in 0 .. 1000; - let account_index = T::AccountIndex::from(i); + let account_index = T::AccountIndex::from(SEED); // Setup accounts - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; diff --git a/frame/indices/src/default_weights.rs b/frame/indices/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..6b3b9c13e40a0c5de46377021aea56c3c2967e8e --- /dev/null +++ b/frame/indices/src/default_weights.rs @@ -0,0 +1,51 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn claim() -> Weight { + (56_237_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn transfer() -> Weight { + (63_665_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn free() -> Weight { + (50_736_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_transfer() -> Weight { + (52_361_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn freeze() -> Weight { + (46_483_000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index c99beb463bc5f5df5244c081d29d00a9687047cd..edbaed17e536f16a6f6a2904a95f9af3955e4d1b 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::{ use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage, ensure}; use frame_support::dispatch::DispatchResult; use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; -use frame_support::weights::{Weight, constants::WEIGHT_PER_MICROS}; +use frame_support::weights::Weight; use frame_system::{ensure_signed, ensure_root}; use self::address::Address as RawAddress; @@ -36,24 +36,17 @@ mod mock; pub mod address; mod tests; mod benchmarking; +mod default_weights; pub type Address = RawAddress<::AccountId, ::AccountIndex>; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; pub trait WeightInfo { - fn claim(i: u32, ) -> Weight; - fn transfer(i: u32, ) -> Weight; - fn free(i: u32, ) -> Weight; - fn force_transfer(i: u32, ) -> Weight; - fn freeze(i: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn claim(_i: u32, ) -> Weight { 1_000_000_000 } - fn transfer(_i: u32, ) -> Weight { 1_000_000_000 } - fn free(_i: u32, ) -> Weight { 1_000_000_000 } - fn force_transfer(_i: u32, ) -> Weight { 1_000_000_000 } - fn freeze(_i: u32, ) -> Weight { 1_000_000_000 } + fn claim() -> Weight; + fn transfer() -> Weight; + fn free() -> Weight; + fn force_transfer() -> Weight; + fn freeze() -> Weight; } /// The module's config trait. @@ -95,11 +88,11 @@ decl_event!( ::AccountId, ::AccountIndex { - /// A account index was assigned. [who, index] + /// A account index was assigned. \[who, index\] IndexAssigned(AccountId, AccountIndex), - /// A account index has been freed up (unassigned). [index] + /// A account index has been freed up (unassigned). \[index\] IndexFreed(AccountIndex), - /// A account index has been frozen to its current account ID. [who, index] + /// A account index has been frozen to its current account ID. \[who, index\] IndexFrozen(AccountIndex, AccountId), } ); @@ -121,6 +114,9 @@ decl_error! { decl_module! { pub struct Module for enum Call where origin: T::Origin, system = frame_system { + /// The deposit needed for reserving an index. + const Deposit: BalanceOf = T::Deposit::get(); + fn deposit_event() = default; /// Assign an previously unassigned index. @@ -139,10 +135,9 @@ decl_module! { /// - One reserve operation. /// - One event. /// ------------------- - /// - Base Weight: 28.69 µs /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 30 * WEIGHT_PER_MICROS] + #[weight = T::WeightInfo::claim()] fn claim(origin, index: T::AccountIndex) { let who = ensure_signed(origin)?; @@ -170,12 +165,11 @@ decl_module! { /// - One transfer operation. /// - One event. /// ------------------- - /// - Base Weight: 33.74 µs /// - DB Weight: /// - Reads: Indices Accounts, System Account (recipient) /// - Writes: Indices Accounts, System Account (recipient) /// # - #[weight = T::DbWeight::get().reads_writes(2, 2) + 35 * WEIGHT_PER_MICROS] + #[weight = T::WeightInfo::transfer()] fn transfer(origin, new: T::AccountId, index: T::AccountIndex) { let who = ensure_signed(origin)?; ensure!(who != new, Error::::NotTransfer); @@ -207,10 +201,9 @@ decl_module! { /// - One reserve operation. /// - One event. /// ------------------- - /// - Base Weight: 25.53 µs /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 25 * WEIGHT_PER_MICROS] + #[weight = T::WeightInfo::free()] fn free(origin, index: T::AccountIndex) { let who = ensure_signed(origin)?; @@ -241,12 +234,11 @@ decl_module! { /// - Up to one reserve operation. /// - One event. /// ------------------- - /// - Base Weight: 26.83 µs /// - DB Weight: /// - Reads: Indices Accounts, System Account (original owner) /// - Writes: Indices Accounts, System Account (original owner) /// # - #[weight = T::DbWeight::get().reads_writes(2, 2) + 25 * WEIGHT_PER_MICROS] + #[weight = T::WeightInfo::force_transfer()] fn force_transfer(origin, new: T::AccountId, index: T::AccountIndex, freeze: bool) { ensure_root(origin)?; @@ -274,10 +266,9 @@ decl_module! { /// - Up to one slash operation. /// - One event. /// ------------------- - /// - Base Weight: 30.86 µs /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 30 * WEIGHT_PER_MICROS] + #[weight = T::WeightInfo::freeze()] fn freeze(origin, index: T::AccountIndex) { let who = ensure_signed(origin)?; diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 97e7a954f8f58876497edf1a2cc431ac56a6ec5a..cfbd2e38c3d3ff60c8e74d7f24a016cee77ad526 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -70,7 +70,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -82,6 +82,7 @@ parameter_types! { } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type DustRemoval = (); type Event = MetaEvent; diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index fd138a97c7eac00fc488e6238890f1dc0c8e4bba..1cac5d38c5f1808b76f03236debe7c019f19e5f4 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-membership" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME membership management pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/membership/README.md b/frame/membership/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a769be497050e1c2f0fc6e2be2d218f6239fff02 --- /dev/null +++ b/frame/membership/README.md @@ -0,0 +1,6 @@ +# Membership Module + +Allows control of membership of a set of `AccountId`s, useful for managing membership of of a +collective. A prime member may be set. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 2bc4a440b8d475b9b4aa6c34db37922fcb72fffa..492fda88dd170ffbee0194d4c21c29d2fc9a76f1 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -320,7 +320,7 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index be508ef2c04ca3473bfbf3509a02569f53720dca..2934b15562c4354d3bee0de82d636579867bfdc3 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "frame-metadata" -version = "11.0.0-rc5" +version = "12.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Decodable variant of the RuntimeMetadata." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/metadata/README.md b/frame/metadata/README.md new file mode 100644 index 0000000000000000000000000000000000000000..423af8602e3f013fa4632e62da9399bd1901806e --- /dev/null +++ b/frame/metadata/README.md @@ -0,0 +1,7 @@ +Decodable variant of the RuntimeMetadata. + +This really doesn't belong here, but is necessary for the moment. In the future +it should be removed entirely to an external module for shimming on to the +codec-encoded metadata. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/metadata/src/lib.rs b/frame/metadata/src/lib.rs index c0eeb76b6f97625179caa47bb08b7232ea1835ca..109f33f42019179a4d7c1ed5c4615a7705dc7ec8 100644 --- a/frame/metadata/src/lib.rs +++ b/frame/metadata/src/lib.rs @@ -362,8 +362,10 @@ pub enum RuntimeMetadata { V9(RuntimeMetadataDeprecated), /// Version 10 for runtime metadata. No longer used. V10(RuntimeMetadataDeprecated), - /// Version 11 for runtime metadata. - V11(RuntimeMetadataV11), + /// Version 11 for runtime metadata. No longer used. + V11(RuntimeMetadataDeprecated), + /// Version 12 for runtime metadata. + V12(RuntimeMetadataV12), } /// Enum that should fail. @@ -387,7 +389,7 @@ impl Decode for RuntimeMetadataDeprecated { /// The metadata of a runtime. #[derive(Eq, Encode, PartialEq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct RuntimeMetadataV11 { +pub struct RuntimeMetadataV12 { /// Metadata of all the modules. pub modules: DecodeDifferentArray, /// Metadata of the extrinsic. @@ -395,7 +397,7 @@ pub struct RuntimeMetadataV11 { } /// The latest version of the metadata. -pub type RuntimeMetadataLastVersion = RuntimeMetadataV11; +pub type RuntimeMetadataLastVersion = RuntimeMetadataV12; /// All metadata about an runtime module. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] @@ -407,6 +409,9 @@ pub struct ModuleMetadata { pub event: ODFnA, pub constants: DFnA, pub errors: DFnA, + /// Define the index of the module, this index will be used for the encoding of module event, + /// call and origin variants. + pub index: u8, } type ODFnA = Option>; @@ -420,6 +425,6 @@ impl Into for RuntimeMetadataPrefixed { impl Into for RuntimeMetadataLastVersion { fn into(self) -> RuntimeMetadataPrefixed { - RuntimeMetadataPrefixed(META_RESERVED, RuntimeMetadata::V11(self)) + RuntimeMetadataPrefixed(META_RESERVED, RuntimeMetadata::V12(self)) } } diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index aae3646644d0a40af4626ef9c92ab7cceac5d5c5..2be66ebb722c19fe334c11e362cfc095efcd4a62 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-multisig" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME multi-signature dispatch pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/multisig/README.md b/frame/multisig/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2209e876f844123bedab34e39f897b32fc5e3c0b --- /dev/null +++ b/frame/multisig/README.md @@ -0,0 +1,29 @@ +# Multisig Module +A module for doing multisig dispatch. + +- [`multisig::Trait`](https://docs.rs/pallet-multisig/latest/pallet_multisig/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-multisig/latest/pallet_multisig/enum.Call.html) + +## Overview + +This module contains functionality for multi-signature dispatch, a (potentially) stateful +operation, allowing multiple signed +origins (accounts) to coordinate and dispatch a call from a well-known origin, derivable +deterministically from the set of account IDs and the threshold number of accounts from the +set that must approve it. In the case that the threshold is just one then this is a stateless +operation. This is useful for multisig wallets where cryptographic threshold signatures are +not available or desired. + +## Interface + +### Dispatchable Functions + +* `as_multi` - Approve and if possible dispatch a call from a composite origin formed from a + number of signed origins. +* `approve_as_multi` - Approve a call from a composite origin. +* `cancel_as_multi` - Cancel a call from a composite origin. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 8113d179cd1576c43dfb0418dbcf94327f63b44b..bf89ec8b09bd4772a144444b9bd5d52558603c48 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -59,6 +59,9 @@ benchmarks! { let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, 1); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller.clone()), signatories, Box::new(call)) verify { // If the benchmark resolves, then the call was dispatched successfully. @@ -73,9 +76,13 @@ benchmarks! { let call_hash = blake2_256(&call); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, 0) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + assert!(!Calls::::contains_key(call_hash)); } as_multi_create_store { @@ -88,6 +95,9 @@ benchmarks! { let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, 0) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); @@ -108,13 +118,43 @@ benchmarks! { let timepoint = Multisig::::timepoint(); // Create the multi, storing for worst case Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + assert!(Calls::::contains_key(call_hash)); let caller2 = signatories2.remove(0); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller2); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, 0) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); } + as_multi_approve_store { + // Signatories, need at least 3 people (so we don't complete the multisig) + let s in 3 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let call_hash = blake2_256(&call); + let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // before the call, get the timepoint + let timepoint = Multisig::::timepoint(); + // Create the multi, not storing + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, 0)?; + assert!(!Calls::::contains_key(call_hash)); + let caller2 = signatories2.remove(0); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller2); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, 0) + verify { + let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; + assert_eq!(multisig.approvals.len(), 2); + assert!(Calls::::contains_key(call_hash)); + } + as_multi_complete { // Signatories, need at least 2 people let s in 2 .. T::MaxSignatories::get() as u32; @@ -138,6 +178,9 @@ benchmarks! { } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller2); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::max_value()) verify { assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); @@ -146,12 +189,15 @@ benchmarks! { approve_as_multi_create { // Signatories, need at least 2 people let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; + // Transaction Length, not a component + let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; let call_hash = blake2_256(&call); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); // Create the multi }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, 0) verify { @@ -161,8 +207,8 @@ benchmarks! { approve_as_multi_approve { // Signatories, need at least 2 people let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; + // Transaction Length, not a component + let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let mut signatories2 = signatories.clone(); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); @@ -181,6 +227,9 @@ benchmarks! { 0 )?; let caller2 = signatories2.remove(0); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller2); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, 0) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; @@ -190,8 +239,8 @@ benchmarks! { approve_as_multi_complete { // Signatories, need at least 2 people let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; + // Transaction Length, not a component + let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); @@ -211,6 +260,9 @@ benchmarks! { } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller2); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: approve_as_multi( RawOrigin::Signed(caller2), s as u16, @@ -226,8 +278,8 @@ benchmarks! { cancel_as_multi { // Signatories, need at least 2 people let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; + // Transaction Length, not a component + let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; @@ -237,30 +289,13 @@ benchmarks! { let o = RawOrigin::Signed(caller.clone()).into(); Multisig::::as_multi(o, s as u16, signatories.clone(), None, call.clone(), true, 0)?; assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); + assert!(Calls::::contains_key(call_hash)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) verify { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); - } - - cancel_as_multi_store { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let call_hash = blake2_256(&call); - let timepoint = Multisig::::timepoint(); - // Create the multi - let o = RawOrigin::Signed(caller.clone()).into(); - Multisig::::as_multi(o, s as u16, signatories.clone(), None, call.clone(), true, 0)?; - assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); - assert!(Calls::::contains_key(call_hash)); - }: cancel_as_multi(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) - verify { - assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); assert!(!Calls::::contains_key(call_hash)); } } @@ -278,12 +313,12 @@ mod tests { assert_ok!(test_benchmark_as_multi_create::()); assert_ok!(test_benchmark_as_multi_create_store::()); assert_ok!(test_benchmark_as_multi_approve::()); + assert_ok!(test_benchmark_as_multi_approve_store::()); assert_ok!(test_benchmark_as_multi_complete::()); assert_ok!(test_benchmark_approve_as_multi_create::()); assert_ok!(test_benchmark_approve_as_multi_approve::()); assert_ok!(test_benchmark_approve_as_multi_complete::()); assert_ok!(test_benchmark_cancel_as_multi::()); - assert_ok!(test_benchmark_cancel_as_multi_store::()); }); } } diff --git a/frame/multisig/src/default_weights.rs b/frame/multisig/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..19d1528d9aaa6c2fd2e6e9acdb5ccde6db402ebb --- /dev/null +++ b/frame/multisig/src/default_weights.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn as_multi_threshold_1(z: u32, ) -> Weight { + (17_161_000 as Weight) + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + } + fn as_multi_create(s: u32, z: u32, ) -> Weight { + (79_857_000 as Weight) + .saturating_add((131_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn as_multi_create_store(s: u32, z: u32, ) -> Weight { + (90_218_000 as Weight) + .saturating_add((129_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + (48_402_000 as Weight) + .saturating_add((132_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { + (88_390_000 as Weight) + .saturating_add((120_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + (98_960_000 as Weight) + .saturating_add((276_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((6_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn approve_as_multi_create(s: u32, ) -> Weight { + (80_185_000 as Weight) + .saturating_add((121_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn approve_as_multi_approve(s: u32, ) -> Weight { + (48_386_000 as Weight) + .saturating_add((143_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn approve_as_multi_complete(s: u32, ) -> Weight { + (177_181_000 as Weight) + .saturating_add((273_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn cancel_as_multi(s: u32, ) -> Weight { + (126_334_000 as Weight) + .saturating_add((124_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index f8f6e8ed63bc98a49464e01a4263abeb775f007c..1ce4e06a6e1181eebaaae2ac84ce728ae8ab615f 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -51,7 +51,7 @@ use codec::{Encode, Decode}; use sp_io::hashing::blake2_256; use frame_support::{decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug}; use frame_support::{traits::{Get, ReservableCurrency, Currency}, - weights::{Weight, GetDispatchInfo, constants::{WEIGHT_PER_NANOS, WEIGHT_PER_MICROS}}, + weights::{Weight, GetDispatchInfo}, dispatch::{DispatchResultWithPostInfo, DispatchErrorWithPostInfo, PostDispatchInfo}, }; use frame_system::{self as system, ensure_signed, RawOrigin}; @@ -59,6 +59,7 @@ use sp_runtime::{DispatchError, DispatchResult, traits::{Dispatchable, Zero}}; mod tests; mod benchmarking; +mod default_weights; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Just a bunch of bytes, but they should decode to a valid `Call`. @@ -69,25 +70,12 @@ pub trait WeightInfo { fn as_multi_create(s: u32, z: u32, ) -> Weight; fn as_multi_create_store(s: u32, z: u32, ) -> Weight; fn as_multi_approve(s: u32, z: u32, ) -> Weight; + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight; fn as_multi_complete(s: u32, z: u32, ) -> Weight; - fn approve_as_multi_create(s: u32, z: u32, ) -> Weight; - fn approve_as_multi_approve(s: u32, z: u32, ) -> Weight; - fn approve_as_multi_complete(s: u32, z: u32, ) -> Weight; - fn cancel_as_multi(s: u32, z: u32, ) -> Weight; - fn cancel_as_multi_store(s: u32, z: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn as_multi_threshold_1(_z: u32, ) -> Weight { 1_000_000_000 } - fn as_multi_create(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn as_multi_create_store(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn as_multi_approve(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn as_multi_complete(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn approve_as_multi_create(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn approve_as_multi_approve(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn approve_as_multi_complete(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn cancel_as_multi(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } - fn cancel_as_multi_store(_s: u32, _z: u32, ) -> Weight { 1_000_000_000 } + fn approve_as_multi_create(s: u32, ) -> Weight; + fn approve_as_multi_approve(s: u32, ) -> Weight; + fn approve_as_multi_complete(s: u32, ) -> Weight; + fn cancel_as_multi(s: u32, ) -> Weight; } /// Configuration trait. @@ -197,59 +185,18 @@ decl_event! { BlockNumber = ::BlockNumber, CallHash = [u8; 32] { - /// A new multisig operation has begun. [approving, multisig, call_hash] + /// A new multisig operation has begun. \[approving, multisig, call_hash\] NewMultisig(AccountId, AccountId, CallHash), - /// A multisig operation has been approved by someone. [approving, timepoint, multisig, call_hash] + /// A multisig operation has been approved by someone. + /// \[approving, timepoint, multisig, call_hash\] MultisigApproval(AccountId, Timepoint, AccountId, CallHash), - /// A multisig operation has been executed. [approving, timepoint, multisig, call_hash] + /// A multisig operation has been executed. \[approving, timepoint, multisig, call_hash\] MultisigExecuted(AccountId, Timepoint, AccountId, CallHash, DispatchResult), - /// A multisig operation has been cancelled. [cancelling, timepoint, multisig, call_hash] + /// A multisig operation has been cancelled. \[cancelling, timepoint, multisig, call_hash\] MultisigCancelled(AccountId, Timepoint, AccountId, CallHash), } } -mod weight_of { - use super::*; - - /// - Base Weight: 33.72 + 0.002 * Z µs - /// - DB Weight: None - /// - Plus Call Weight - pub fn as_multi_threshold_1( - call_len: usize, - call_weight: Weight, - ) -> Weight { - (34 * WEIGHT_PER_MICROS) - .saturating_add((2 * WEIGHT_PER_NANOS).saturating_mul(call_len as Weight)) - .saturating_add(call_weight) - } - - /// - Base Weight: - /// - Create: 38.82 + 0.121 * S + .001 * Z µs - /// - Create w/ Store: 54.22 + 0.120 * S + .003 * Z µs - /// - Approve: 29.86 + 0.143 * S + .001 * Z µs - /// - Complete: 39.55 + 0.267 * S + .002 * Z µs - /// - DB Weight: - /// - Reads: Multisig Storage, [Caller Account], Calls, Depositor Account - /// - Writes: Multisig Storage, [Caller Account], Calls, Depositor Account - /// - Plus Call Weight - pub fn as_multi( - sig_len: usize, - call_len: usize, - call_weight: Weight, - calls_write: bool, - refunded: bool, - ) -> Weight { - call_weight - .saturating_add(55 * WEIGHT_PER_MICROS) - .saturating_add((250 * WEIGHT_PER_NANOS).saturating_mul(sig_len as Weight)) - .saturating_add((3 * WEIGHT_PER_NANOS).saturating_mul(call_len as Weight)) - .saturating_add(T::DbWeight::get().reads_writes(1, 1)) // Multisig read/write - .saturating_add(T::DbWeight::get().reads(1)) // Calls read - .saturating_add(T::DbWeight::get().writes(calls_write.into())) // Calls write - .saturating_add(T::DbWeight::get().reads_writes(refunded.into(), refunded.into())) // Deposit refunded - } -} - enum CallOrHash { Call(OpaqueCall, bool), Hash([u8; 32]), @@ -262,6 +209,16 @@ decl_module! { /// Deposit one of this module's events by using the default implementation. fn deposit_event() = default; + /// The base amount of currency needed to reserve for creating a multisig execution or to store + /// a dispatch call for later. + const DepositBase: BalanceOf = T::DepositBase::get(); + + /// The amount of currency needed per unit threshold when creating a multisig execution. + const DepositFactor: BalanceOf = T::DepositFactor::get(); + + /// The maximum amount of signatories allowed for a given multisig. + const MaxSignatories: u16 = T::MaxSignatories::get(); + /// Immediately dispatch a multi-signature call using a single approval from the caller. /// /// The dispatch origin for this call must be _Signed_. @@ -275,14 +232,12 @@ decl_module! { /// # /// O(Z + C) where Z is the length of the call and C its execution weight. /// ------------------------------- - /// - Base Weight: 33.72 + 0.002 * Z µs /// - DB Weight: None /// - Plus Call Weight /// # #[weight = ( - weight_of::as_multi_threshold_1::( - call.using_encoded(|c| c.len()), - call.get_dispatch_info().weight + T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) + .saturating_add(call.get_dispatch_info().weight ), call.get_dispatch_info().class, )] @@ -303,17 +258,14 @@ decl_module! { let result = call.dispatch(RawOrigin::Signed(id).into()); result.map(|post_dispatch_info| post_dispatch_info.actual_weight - .map(|actual_weight| weight_of::as_multi_threshold_1::( - call_len, - actual_weight, - )) - .into() + .map(|actual_weight| + T::WeightInfo::as_multi_threshold_1(call_len as u32) + .saturating_add(actual_weight) + ).into() ).map_err(|err| match err.post_info.actual_weight { Some(actual_weight) => { - let weight_used = weight_of::as_multi_threshold_1::( - call_len, - actual_weight, - ); + let weight_used = T::WeightInfo::as_multi_threshold_1(call_len as u32) + .saturating_add(actual_weight); let post_info = Some(weight_used).into(); let error = err.error.into(); DispatchErrorWithPostInfo { post_info, error } @@ -363,23 +315,21 @@ decl_module! { /// deposit taken for its lifetime of /// `DepositBase + threshold * DepositFactor`. /// ------------------------------- - /// - Base Weight: - /// - Create: 41.89 + 0.118 * S + .002 * Z µs - /// - Create w/ Store: 53.57 + 0.119 * S + .003 * Z µs - /// - Approve: 31.39 + 0.136 * S + .002 * Z µs - /// - Complete: 39.94 + 0.26 * S + .002 * Z µs /// - DB Weight: /// - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`) /// - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`) /// - Plus Call Weight /// # - #[weight = weight_of::as_multi::( - other_signatories.len(), - call.len(), - *max_weight, - true, // assume worst case: calls write - true, // assume worst case: refunded - )] + #[weight = { + let s = other_signatories.len() as u32; + let z = call.len() as u32; + + T::WeightInfo::as_multi_create(s, z) + .max(T::WeightInfo::as_multi_create_store(s, z)) + .max(T::WeightInfo::as_multi_approve(s, z)) + .max(T::WeightInfo::as_multi_complete(s, z)) + .saturating_add(*max_weight) + }] fn as_multi(origin, threshold: u16, other_signatories: Vec, @@ -424,20 +374,18 @@ decl_module! { /// deposit taken for its lifetime of /// `DepositBase + threshold * DepositFactor`. /// ---------------------------------- - /// - Base Weight: - /// - Create: 44.71 + 0.088 * S - /// - Approve: 31.48 + 0.116 * S /// - DB Weight: /// - Read: Multisig Storage, [Caller Account] /// - Write: Multisig Storage, [Caller Account] /// # - #[weight = weight_of::as_multi::( - other_signatories.len(), - 0, // call_len is zero in this case - *max_weight, - true, // assume worst case: calls write - true, // assume worst case: refunded - )] + #[weight = { + let s = other_signatories.len() as u32; + + T::WeightInfo::approve_as_multi_create(s) + .max(T::WeightInfo::approve_as_multi_approve(s)) + .max(T::WeightInfo::approve_as_multi_complete(s)) + .saturating_add(*max_weight) + }] fn approve_as_multi(origin, threshold: u16, other_signatories: Vec, @@ -471,15 +419,11 @@ decl_module! { /// - I/O: 1 read `O(S)`, one remove. /// - Storage: removes one item. /// ---------------------------------- - /// - Base Weight: 36.07 + 0.124 * S /// - DB Weight: /// - Read: Multisig Storage, [Caller Account], Refund Account, Calls /// - Write: Multisig Storage, [Caller Account], Refund Account, Calls /// # - #[weight = T::DbWeight::get().reads_writes(3, 3) - .saturating_add(36 * WEIGHT_PER_MICROS) - .saturating_add((other_signatories.len() as Weight).saturating_mul(100 * WEIGHT_PER_NANOS)) - ] + #[weight = T::WeightInfo::cancel_as_multi(other_signatories.len() as u32)] fn cancel_as_multi(origin, threshold: u16, other_signatories: Vec, @@ -565,7 +509,7 @@ impl Module { Self::get_call(&call_hash, maybe_call.as_ref().map(|c| c.as_ref())) } else { None }; - if let Some(call) = maybe_approved_call { + if let Some((call, call_len)) = maybe_approved_call { // verify weight ensure!(call.get_dispatch_info().weight <= max_weight, Error::::WeightTooLow); @@ -579,13 +523,12 @@ impl Module { Self::deposit_event(RawEvent::MultisigExecuted( who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) )); - Ok(get_result_weight(result).map(|actual_weight| weight_of::as_multi::( - other_signatories_len, - call_len, - actual_weight, - true, // Call is removed - true, // User is refunded - )).into()) + Ok(get_result_weight(result).map(|actual_weight| + T::WeightInfo::as_multi_complete( + other_signatories_len as u32, + call_len as u32 + ).saturating_add(actual_weight) + ).into()) } else { // We cannot dispatch the call now; either it isn't available, or it is, but we // don't have threshold approvals even with our signature. @@ -609,14 +552,19 @@ impl Module { ensure!(stored, Error::::AlreadyApproved); } + let final_weight = if stored { + T::WeightInfo::as_multi_approve_store( + other_signatories_len as u32, + call_len as u32, + ) + } else { + T::WeightInfo::as_multi_approve( + other_signatories_len as u32, + call_len as u32, + ) + }; // Call is not made, so the actual weight does not include call - Ok(Some(weight_of::as_multi::( - other_signatories_len, - call_len, - 0, - stored, // Call stored? - false, // No refund - )).into()) + Ok(Some(final_weight).into()) } } else { // Not yet started; there should be no timepoint given. @@ -641,14 +589,20 @@ impl Module { approvals: vec![who.clone()], }); Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); - // Call is not made, so we can return that weight - return Ok(Some(weight_of::as_multi::( - other_signatories_len, - call_len, - 0, - stored, // Call stored? - false, // No refund - )).into()) + + let final_weight = if stored { + T::WeightInfo::as_multi_create_store( + other_signatories_len as u32, + call_len as u32, + ) + } else { + T::WeightInfo::as_multi_create( + other_signatories_len as u32, + call_len as u32, + ) + }; + // Call is not made, so the actual weight does not include call + Ok(Some(final_weight).into()) } } @@ -672,13 +626,13 @@ impl Module { } /// Attempt to decode and return the call, provided by the user or from storage. - fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<::Call> { + fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<(::Call, usize)> { maybe_known.map_or_else(|| { Calls::::get(hash).and_then(|(data, ..)| { - Decode::decode(&mut &data[..]).ok() + Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) }) }, |data| { - Decode::decode(&mut &data[..]).ok() + Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) }) } diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 888dcecb3a8fd9cd1a8e8f60ede7f644828d0093..ca15e04597eaafff1779107183ac974012c68040 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -80,7 +80,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -90,6 +90,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = TestEvent; type DustRemoval = (); diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index e63ed2c600d8547b4d7e0613e05afa7f9fc671c3..8f348d665b7e3891734136685c5e0d57a1b03ebe 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-nicks" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for nick management" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/nicks/README.md b/frame/nicks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b4c88eff43152162ba68ca08d7b794617d8b9c24 --- /dev/null +++ b/frame/nicks/README.md @@ -0,0 +1,25 @@ +# Nicks Module + +- [`nicks::Trait`](https://docs.rs/pallet-nicks/latest/pallet_nicks/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-nicks/latest/pallet_nicks/enum.Call.html) + +## Overview + +Nicks is an example module for keeping track of account names on-chain. It makes no effort to +create a name hierarchy, be a DNS replacement or provide reverse lookups. Furthermore, the +weights attached to this module's dispatchable functions are for demonstration purposes only and +have not been designed to be economically secure. Do not use this pallet as-is in production. + +## Interface + +### Dispatchable Functions + +* `set_name` - Set the associated name of an account; a small deposit is reserved if not already + taken. +* `clear_name` - Remove an account's associated name; the deposit is returned. +* `kill_name` - Forcibly remove the associated name; the deposit is lost. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 56262819c9654cb471af72bb22b4641c257bd03b..ddeadfb7680fef1b27217f5e65bc394c808acdf8 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -22,8 +22,10 @@ //! //! ## Overview //! -//! Nicks is a trivial module for keeping track of account names on-chain. It makes no effort to -//! create a name hierarchy, be a DNS replacement or provide reverse lookups. +//! Nicks is an example module for keeping track of account names on-chain. It makes no effort to +//! create a name hierarchy, be a DNS replacement or provide reverse lookups. Furthermore, the +//! weights attached to this module's dispatchable functions are for demonstration purposes only and +//! have not been designed to be economically secure. Do not use this pallet as-is in production. //! //! ## Interface //! @@ -84,15 +86,15 @@ decl_storage! { decl_event!( pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { - /// A name was set. [who] + /// A name was set. \[who\] NameSet(AccountId), - /// A name was forcibly set. [target] + /// A name was forcibly set. \[target\] NameForced(AccountId), - /// A name was changed. [who] + /// A name was changed. \[who\] NameChanged(AccountId), - /// A name was cleared, and the given balance returned. [who, deposit] + /// A name was cleared, and the given balance returned. \[who, deposit\] NameCleared(AccountId, Balance), - /// A name was removed and the given balance slashed. [target, deposit] + /// A name was removed and the given balance slashed. \[target, deposit\] NameKilled(AccountId, Balance), } ); @@ -149,12 +151,12 @@ decl_module! { ensure!(name.len() <= T::MaxLength::get(), Error::::TooLong); let deposit = if let Some((_, deposit)) = >::get(&sender) { - Self::deposit_event(RawEvent::NameSet(sender.clone())); + Self::deposit_event(RawEvent::NameChanged(sender.clone())); deposit } else { let deposit = T::ReservationFee::get(); T::Currency::reserve(&sender, deposit.clone())?; - Self::deposit_event(RawEvent::NameChanged(sender.clone())); + Self::deposit_event(RawEvent::NameSet(sender.clone())); deposit }; @@ -281,7 +283,7 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -291,6 +293,7 @@ mod tests { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = (); type DustRemoval = (); diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..1448e99bd2a1461926b921be7dd2dbed450d4c1a --- /dev/null +++ b/frame/node-authorization/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "pallet-node-authorization" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for node authorization" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "frame-support/std", + "frame-system/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..91f89ad1d91001355e6bd4ac7afa0194ed8fc2b5 --- /dev/null +++ b/frame/node-authorization/src/lib.rs @@ -0,0 +1,861 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Node authorization pallet +//! +//! This pallet manages a configurable set of nodes for a permissioned network. +//! Each node is dentified by a PeerId (i.e. Vec). It provides two ways to +//! authorize a node, +//! +//! - a set of well known nodes across different organizations in which the +//! connections are allowed. +//! - users can claim the ownership for each node, then manage the connections of +//! the node. +//! +//! A node must have an owner. The owner can additionally change the connections +//! for the node. Only one user is allowed to claim a specific node. To eliminate +//! false claim, the maintainer of the node should claim it before even starting the +//! node. This pallet uses offchain worker to set reserved nodes, if the node is not +//! an authority, make sure to enable offchain worker with the right CLI flag. The +//! node can be lagged with the latest block, in this case you need to disable offchain +//! worker and manually set reserved nodes when starting it. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_core::OpaquePeerId as PeerId; +use sp_std::{ + collections::btree_set::BTreeSet, + iter::FromIterator, + prelude::*, +}; +use codec::Decode; +use frame_support::{ + decl_module, decl_storage, decl_event, decl_error, + debug, ensure, + weights::{DispatchClass, Weight}, + traits::{Get, EnsureOrigin}, +}; +use frame_system::ensure_signed; + +pub trait WeightInfo { + fn add_well_known_node() -> Weight; + fn remove_well_known_node() -> Weight; + fn swap_well_known_node() -> Weight; + fn reset_well_known_nodes() -> Weight; + fn claim_node() -> Weight; + fn remove_claim() -> Weight; + fn transfer_node() -> Weight; + fn add_connections() -> Weight; + fn remove_connections() -> Weight; +} + +impl WeightInfo for () { + fn add_well_known_node() -> Weight { 50_000_000 } + fn remove_well_known_node() -> Weight { 50_000_000 } + fn swap_well_known_node() -> Weight { 50_000_000 } + fn reset_well_known_nodes() -> Weight { 50_000_000 } + fn claim_node() -> Weight { 50_000_000 } + fn remove_claim() -> Weight { 50_000_000 } + fn transfer_node() -> Weight { 50_000_000 } + fn add_connections() -> Weight { 50_000_000 } + fn remove_connections() -> Weight { 50_000_000 } +} + +pub trait Trait: frame_system::Trait { + /// The event type of this module. + type Event: From> + Into<::Event>; + + /// The maximum number of well known nodes that are allowed to set + type MaxWellKnownNodes: Get; + + /// The maximum length in bytes of PeerId + type MaxPeerIdLength: Get; + + /// The origin which can add a well known node. + type AddOrigin: EnsureOrigin; + + /// The origin which can remove a well known node. + type RemoveOrigin: EnsureOrigin; + + /// The origin which can swap the well known nodes. + type SwapOrigin: EnsureOrigin; + + /// The origin which can reset the well known nodes. + type ResetOrigin: EnsureOrigin; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +decl_storage! { + trait Store for Module as NodeAuthorization { + /// The set of well known nodes. This is stored sorted (just by value). + pub WellKnownNodes get(fn well_known_nodes): BTreeSet; + /// A map that maintains the ownership of each node. + pub Owners get(fn owners): + map hasher(blake2_128_concat) PeerId => T::AccountId; + /// The additional adapative connections of each node. + pub AdditionalConnections get(fn additional_connection): + map hasher(blake2_128_concat) PeerId => BTreeSet; + } + add_extra_genesis { + config(nodes): Vec<(PeerId, T::AccountId)>; + build(|config: &GenesisConfig| { + >::initialize_nodes(&config.nodes) + }) + } +} + +decl_event! { + pub enum Event where + ::AccountId, + { + /// The given well known node was added. + NodeAdded(PeerId, AccountId), + /// The given well known node was removed. + NodeRemoved(PeerId), + /// The given well known node was swapped; first item was removed, + /// the latter was added. + NodeSwapped(PeerId, PeerId), + /// The given well known nodes were reset. + NodesReset(Vec<(PeerId, AccountId)>), + /// The given node was claimed by a user. + NodeClaimed(PeerId, AccountId), + /// The given claim was removed by its owner. + ClaimRemoved(PeerId, AccountId), + /// The node was transferred to another account. + NodeTransferred(PeerId, AccountId), + /// The allowed connections were added to a node. + ConnectionsAdded(PeerId, Vec), + /// The allowed connections were removed from a node. + ConnectionsRemoved(PeerId, Vec), + } +} + +decl_error! { + /// Error for the node authorization module. + pub enum Error for Module { + /// The PeerId is too long. + PeerIdTooLong, + /// Too many well known nodes. + TooManyNodes, + /// The node is already joined in the list. + AlreadyJoined, + /// The node doesn't exist in the list. + NotExist, + /// The node is already claimed by a user. + AlreadyClaimed, + /// The node hasn't been claimed yet. + NotClaimed, + /// You are not the owner of the node. + NotOwner, + /// No permisson to perform specific operation. + PermissionDenied, + } +} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + /// The maximum number of authorized well known nodes + const MaxWellKnownNodes: u32 = T::MaxWellKnownNodes::get(); + + /// The maximum length in bytes of PeerId + const MaxPeerIdLength: u32 = T::MaxPeerIdLength::get(); + + type Error = Error; + + fn deposit_event() = default; + + /// Add a node to the set of well known nodes. If the node is already claimed, the owner + /// will be updated and keep the existing additional connection unchanged. + /// + /// May only be called from `T::AddOrigin`. + /// + /// - `node`: identifier of the node. + #[weight = (T::WeightInfo::add_well_known_node(), DispatchClass::Operational)] + pub fn add_well_known_node(origin, node: PeerId, owner: T::AccountId) { + T::AddOrigin::ensure_origin(origin)?; + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + + let mut nodes = WellKnownNodes::get(); + ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); + ensure!(!nodes.contains(&node), Error::::AlreadyJoined); + + nodes.insert(node.clone()); + + WellKnownNodes::put(&nodes); + >::insert(&node, &owner); + + Self::deposit_event(RawEvent::NodeAdded(node, owner)); + } + + /// Remove a node from the set of well known nodes. The ownership and additional + /// connections of the node will also be removed. + /// + /// May only be called from `T::RemoveOrigin`. + /// + /// - `node`: identifier of the node. + #[weight = (T::WeightInfo::remove_well_known_node(), DispatchClass::Operational)] + pub fn remove_well_known_node(origin, node: PeerId) { + T::RemoveOrigin::ensure_origin(origin)?; + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + + let mut nodes = WellKnownNodes::get(); + ensure!(nodes.contains(&node), Error::::NotExist); + + nodes.remove(&node); + + WellKnownNodes::put(&nodes); + >::remove(&node); + AdditionalConnections::remove(&node); + + Self::deposit_event(RawEvent::NodeRemoved(node)); + } + + /// Swap a well known node to another. Both the ownership and additional connections + /// stay untouched. + /// + /// May only be called from `T::SwapOrigin`. + /// + /// - `remove`: the node which will be moved out from the list. + /// - `add`: the node which will be put in the list. + #[weight = (T::WeightInfo::swap_well_known_node(), DispatchClass::Operational)] + pub fn swap_well_known_node(origin, remove: PeerId, add: PeerId) { + T::SwapOrigin::ensure_origin(origin)?; + ensure!(remove.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + ensure!(add.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + + if remove == add { return Ok(()) } + + let mut nodes = WellKnownNodes::get(); + ensure!(nodes.contains(&remove), Error::::NotExist); + ensure!(!nodes.contains(&add), Error::::AlreadyJoined); + + nodes.remove(&remove); + nodes.insert(add.clone()); + + WellKnownNodes::put(&nodes); + Owners::::swap(&remove, &add); + AdditionalConnections::swap(&remove, &add); + + Self::deposit_event(RawEvent::NodeSwapped(remove, add)); + } + + /// Reset all the well known nodes. This will not remove the ownership and additional + /// connections for the removed nodes. The node owner can perform further cleaning if + /// they decide to leave the network. + /// + /// May only be called from `T::ResetOrigin`. + /// + /// - `nodes`: the new nodes for the allow list. + #[weight = (T::WeightInfo::reset_well_known_nodes(), DispatchClass::Operational)] + pub fn reset_well_known_nodes(origin, nodes: Vec<(PeerId, T::AccountId)>) { + T::ResetOrigin::ensure_origin(origin)?; + ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); + + Self::initialize_nodes(&nodes); + + Self::deposit_event(RawEvent::NodesReset(nodes)); + } + + /// A given node can be claimed by anyone. The owner should be the first to know its + /// PeerId, so claim it right away! + /// + /// - `node`: identifier of the node. + #[weight = T::WeightInfo::claim_node()] + pub fn claim_node(origin, node: PeerId) { + let sender = ensure_signed(origin)?; + + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + ensure!(!Owners::::contains_key(&node),Error::::AlreadyClaimed); + + Owners::::insert(&node, &sender); + Self::deposit_event(RawEvent::NodeClaimed(node, sender)); + } + + /// A claim can be removed by its owner and get back the reservation. The additional + /// connections are also removed. You can't remove a claim on well known nodes, as it + /// needs to reach consensus among the network participants. + /// + /// - `node`: identifier of the node. + #[weight = T::WeightInfo::remove_claim()] + pub fn remove_claim(origin, node: PeerId) { + let sender = ensure_signed(origin)?; + + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + ensure!(Owners::::contains_key(&node), Error::::NotClaimed); + ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + ensure!(!WellKnownNodes::get().contains(&node), Error::::PermissionDenied); + + Owners::::remove(&node); + AdditionalConnections::remove(&node); + + Self::deposit_event(RawEvent::ClaimRemoved(node, sender)); + } + + /// A node can be transferred to a new owner. + /// + /// - `node`: identifier of the node. + /// - `owner`: new owner of the node. + #[weight = T::WeightInfo::transfer_node()] + pub fn transfer_node(origin, node: PeerId, owner: T::AccountId) { + let sender = ensure_signed(origin)?; + + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + ensure!(Owners::::contains_key(&node), Error::::NotClaimed); + ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + + Owners::::insert(&node, &owner); + + Self::deposit_event(RawEvent::NodeTransferred(node, owner)); + } + + /// Add additional connections to a given node. + /// + /// - `node`: identifier of the node. + /// - `connections`: additonal nodes from which the connections are allowed. + #[weight = T::WeightInfo::add_connections()] + pub fn add_connections( + origin, + node: PeerId, + connections: Vec + ) { + let sender = ensure_signed(origin)?; + + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + ensure!(Owners::::contains_key(&node), Error::::NotClaimed); + ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + + let mut nodes = AdditionalConnections::get(&node); + + for add_node in connections.iter() { + if *add_node == node { + continue; + } + nodes.insert(add_node.clone()); + } + + AdditionalConnections::insert(&node, nodes); + + Self::deposit_event(RawEvent::ConnectionsAdded(node, connections)); + } + + /// Remove additional connections of a given node. + /// + /// - `node`: identifier of the node. + /// - `connections`: additonal nodes from which the connections are not allowed anymore. + #[weight = T::WeightInfo::remove_connections()] + pub fn remove_connections( + origin, + node: PeerId, + connections: Vec + ) { + let sender = ensure_signed(origin)?; + + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); + ensure!(Owners::::contains_key(&node), Error::::NotClaimed); + ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + + let mut nodes = AdditionalConnections::get(&node); + + for remove_node in connections.iter() { + nodes.remove(remove_node); + } + + AdditionalConnections::insert(&node, nodes); + + Self::deposit_event(RawEvent::ConnectionsRemoved(node, connections)); + } + + /// Set reserved node every block. It may not be enabled depends on the offchain + /// worker settings when starting the node. + fn offchain_worker(now: T::BlockNumber) { + let network_state = sp_io::offchain::network_state(); + match network_state { + Err(_) => debug::error!("Error: failed to get network state of node at {:?}", now), + Ok(state) => { + let encoded_peer = state.peer_id.0; + match Decode::decode(&mut &encoded_peer[..]) { + Err(_) => debug::error!("Error: failed to decode PeerId at {:?}", now), + Ok(node) => sp_io::offchain::set_authorized_nodes( + Self::get_authorized_nodes(&PeerId(node)), + true + ) + } + } + } + } + } +} + +impl Module { + fn initialize_nodes(nodes: &Vec<(PeerId, T::AccountId)>) { + let peer_ids = nodes.iter() + .map(|item| item.0.clone()) + .collect::>(); + WellKnownNodes::put(&peer_ids); + + for (node, who) in nodes.iter() { + Owners::::insert(node, who); + } + } + + fn get_authorized_nodes(node: &PeerId) -> Vec { + let mut nodes = AdditionalConnections::get(node); + + let mut well_known_nodes = WellKnownNodes::get(); + if well_known_nodes.contains(node) { + well_known_nodes.remove(node); + nodes.extend(well_known_nodes); + } + + Vec::from_iter(nodes) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use frame_support::{ + assert_ok, assert_noop, impl_outer_origin, weights::Weight, + parameter_types, ord_parameter_types, + }; + use frame_system::EnsureSignedBy; + use sp_core::H256; + use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type BlockExecutionWeight = (); + type ExtrinsicBaseWeight = (); + type MaximumExtrinsicWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type PalletInfo = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + } + + ord_parameter_types! { + pub const One: u64 = 1; + pub const Two: u64 = 2; + pub const Three: u64 = 3; + pub const Four: u64 = 4; + } + parameter_types! { + pub const MaxWellKnownNodes: u32 = 4; + pub const MaxPeerIdLength: u32 = 2; + } + impl Trait for Test { + type Event = (); + type MaxWellKnownNodes = MaxWellKnownNodes; + type MaxPeerIdLength = MaxPeerIdLength; + type AddOrigin = EnsureSignedBy; + type RemoveOrigin = EnsureSignedBy; + type SwapOrigin = EnsureSignedBy; + type ResetOrigin = EnsureSignedBy; + type WeightInfo = (); + } + + type NodeAuthorization = Module; + + fn test_node(id: u8) -> PeerId { + PeerId(vec![id]) + } + + fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + GenesisConfig:: { + nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], + }.assimilate_storage(&mut t).unwrap(); + t.into() + } + + #[test] + fn add_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(2), test_node(15), 15), + BadOrigin + ); + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), PeerId(vec![1, 2, 3]), 15), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(20), 20), + Error::::AlreadyJoined + ); + + assert_ok!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15) + ); + assert_eq!( + WellKnownNodes::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) + ); + assert_eq!(Owners::::get(test_node(10)), 10); + assert_eq!(Owners::::get(test_node(20)), 20); + assert_eq!(Owners::::get(test_node(30)), 30); + assert_eq!(Owners::::get(test_node(15)), 15); + + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(25), 25), + Error::::TooManyNodes + ); + }); + } + + #[test] + fn remove_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(3), test_node(20)), + BadOrigin + ); + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(40)), + Error::::NotExist + ); + + AdditionalConnections::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(40)]) + ); + assert!(AdditionalConnections::contains_key(test_node(20))); + + assert_ok!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20)) + ); + assert_eq!( + WellKnownNodes::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(30)]) + ); + assert!(!Owners::::contains_key(test_node(20))); + assert!(!AdditionalConnections::contains_key(test_node(20))); + }); + } + + #[test] + fn swap_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(4), test_node(20), test_node(5) + ), + BadOrigin + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), PeerId(vec![1, 2, 3]), test_node(20) + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), PeerId(vec![1, 2, 3]) + ), + Error::::PeerIdTooLong + ); + + assert_ok!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), test_node(20) + ) + ); + assert_eq!( + WellKnownNodes::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(20), test_node(30)]) + ); + + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(15), test_node(5) + ), + Error::::NotExist + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), test_node(30) + ), + Error::::AlreadyJoined + ); + + AdditionalConnections::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(15)]) + ); + assert_ok!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), test_node(20), test_node(5) + ) + ); + assert_eq!( + WellKnownNodes::get(), + BTreeSet::from_iter(vec![test_node(5), test_node(10), test_node(30)]) + ); + assert!(!Owners::::contains_key(test_node(20))); + assert_eq!(Owners::::get(test_node(5)), 20); + assert!(!AdditionalConnections::contains_key(test_node(20))); + assert_eq!( + AdditionalConnections::get(test_node(5)), + BTreeSet::from_iter(vec![test_node(15)]) + ); + }); + } + + #[test] + fn reset_well_known_nodes_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(3), + vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] + ), + BadOrigin + ); + assert_noop!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(4), + vec![ + (test_node(15), 15), + (test_node(5), 5), + (test_node(20), 20), + (test_node(25), 25), + ] + ), + Error::::TooManyNodes + ); + + assert_ok!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(4), + vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] + ) + ); + assert_eq!( + WellKnownNodes::get(), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(20)]) + ); + assert_eq!(Owners::::get(test_node(5)), 5); + assert_eq!(Owners::::get(test_node(15)), 15); + assert_eq!(Owners::::get(test_node(20)), 20); + }); + } + + #[test] + fn claim_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::claim_node(Origin::signed(1), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::claim_node(Origin::signed(1), test_node(20)), + Error::::AlreadyClaimed + ); + + assert_ok!(NodeAuthorization::claim_node(Origin::signed(15), test_node(15))); + assert_eq!(Owners::::get(test_node(15)), 15); + }); + } + + #[test] + fn remove_claim_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), test_node(15)), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), test_node(20)), + Error::::NotOwner + ); + + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(20), test_node(20)), + Error::::PermissionDenied + ); + + Owners::::insert(test_node(15), 15); + AdditionalConnections::insert( + test_node(15), + BTreeSet::from_iter(vec![test_node(20)]) + ); + assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); + assert!(!Owners::::contains_key(test_node(15))); + assert!(!AdditionalConnections::contains_key(test_node(15))); + }); + } + + #[test] + fn transfer_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), PeerId(vec![1, 2, 3]), 10), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), test_node(15), 10), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), test_node(20), 10), + Error::::NotOwner + ); + + assert_ok!(NodeAuthorization::transfer_node(Origin::signed(20), test_node(20), 15)); + assert_eq!(Owners::::get(test_node(20)), 15); + }); + } + + #[test] + fn add_connections_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), test_node(15), vec![test_node(5)] + ), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), test_node(20), vec![test_node(5)] + ), + Error::::NotOwner + ); + + assert_ok!( + NodeAuthorization::add_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5), test_node(25), test_node(20)] + ) + ); + assert_eq!( + AdditionalConnections::get(test_node(20)), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + ); + }); + } + + #[test] + fn remove_connections_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), test_node(15), vec![test_node(5)] + ), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), test_node(20), vec![test_node(5)] + ), + Error::::NotOwner + ); + + AdditionalConnections::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + ); + assert_ok!( + NodeAuthorization::remove_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5)] + ) + ); + assert_eq!( + AdditionalConnections::get(test_node(20)), + BTreeSet::from_iter(vec![test_node(25)]) + ); + }); + } + + #[test] + fn get_authorized_nodes_works() { + new_test_ext().execute_with(|| { + AdditionalConnections::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + ); + + let mut authorized_nodes = Module::::get_authorized_nodes(&test_node(20)); + authorized_nodes.sort(); + assert_eq!( + authorized_nodes, + vec![test_node(5), test_node(10), test_node(15), test_node(25), test_node(30)] + ); + }); + } +} diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index db0c847e9a143adaed09e52097e4d6f6b4620dae..c5c8881007c228280f458cc201800516ccf31f1e 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -1,29 +1,30 @@ [package] name = "pallet-offences" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME offences pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/offences/README.md b/frame/offences/README.md new file mode 100644 index 0000000000000000000000000000000000000000..454c7effaf36cc599516feb5d047606415ee68d5 --- /dev/null +++ b/frame/offences/README.md @@ -0,0 +1,5 @@ +# Offences Module + +Tracks reported offences + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 12d4882e60bd0242f953a66ce3347d5600fee2a4..7a95cebc4fb215a5e36e80aa99d7f68ed659cfb6 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -1,38 +1,39 @@ [package] name = "pallet-offences-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME offences pallet benchmarking" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../system" } -pallet-babe = { version = "2.0.0-rc5", default-features = false, path = "../../babe" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../../balances" } -pallet-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../grandpa" } -pallet-im-online = { version = "2.0.0-rc5", default-features = false, path = "../../im-online" } -pallet-offences = { version = "2.0.0-rc5", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../../session" } -pallet-staking = { version = "2.0.0-rc5", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/staking" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } +frame-support = { version = "2.0.0", default-features = false, path = "../../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../../system" } +pallet-babe = { version = "2.0.0", default-features = false, path = "../../babe" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../../balances" } +pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../grandpa" } +pallet-im-online = { version = "2.0.0", default-features = false, path = "../../im-online" } +pallet-offences = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } +pallet-session = { version = "2.0.0", default-features = false, path = "../../session" } +pallet-staking = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../../primitives/staking" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0", path = "../../staking/reward-curve" } +pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } serde = { version = "1.0.101" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/offences/benchmarking/README.md b/frame/offences/benchmarking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cbfe91d73a6a7cc5118f4e56a806f4c766c426d1 --- /dev/null +++ b/frame/offences/benchmarking/README.md @@ -0,0 +1,3 @@ +Offences pallet benchmarking. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 1aa9fed85b1270e8a6adac8d235adfaec8fa2121..e35050992368ac4454cf295a15521bcd51c3045d 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -125,7 +125,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &'s RawOrigin::Signed(nominator_stash.clone()).into(), nominator_controller_lookup.clone(), amount.clone(), - reward_destination, + reward_destination.clone(), )?; let selected_validators: Vec> = vec![controller_lookup.clone()]; diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index ad6e8a14d5622f0fe1922fc31e5801262ab10323..c247e9a3779b4c35f912688ea0e3ba49b393fe5b 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -59,7 +59,7 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = (); type MaximumBlockLength = (); type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (Balances,); @@ -72,6 +72,7 @@ parameter_types! { pub const ExistentialDeposit: Balance = 10; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = Balance; type Event = Event; type DustRemoval = (); @@ -203,7 +204,6 @@ impl pallet_offences::Trait for Test { type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; - type WeightInfo = (); } impl frame_system::offchain::SendTransactionTypes for Test where Call: From { diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 9a067d903fe2dba46cd51d54e6ad98adfab9afb1..bec1981301219545770bf2cdbe8b65c500f7f70e 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -77,8 +77,6 @@ pub trait Trait: frame_system::Trait { /// `on_initialize`. /// Note it's going to be exceeded before we stop adding to it, so it has to be set conservatively. type WeightSoftLimit: Get; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; } decl_storage! { @@ -111,8 +109,8 @@ decl_event!( pub enum Event { /// There is an offence reported of the given `kind` happened at the `session_index` and /// (kind-specific) time slot. This event is not deposited for duplicate slashes. last - /// element indicates of the offence was applied (true) or queued (false) - /// [kind, timeslot, applied]. + /// element indicates of the offence was applied (true) or queued (false) + /// \[kind, timeslot, applied\]. Offence(Kind, OpaqueTimeSlot, bool), } ); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index f981e70835c0e14acf0a7b03ac4bd1dcd9b30c77..58ee97a9bcbb51465a8d464a4a0b12a2ce38c5b0 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -116,7 +116,7 @@ impl frame_system::Trait for Runtime { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -132,7 +132,6 @@ impl Trait for Runtime { type IdentificationTuple = u64; type OnOffenceHandler = OnOffenceHandler; type WeightSoftLimit = OffencesWeightSoftLimit; - type WeightInfo = (); } mod offences { diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 155a1395420067d9476ac64c345b9eb32180bf4a..1c32edf162c69e68b099724c9d620997549b3e32 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-proxy" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME proxying pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,19 +15,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-utility = { version = "2.0.0-rc5", path = "../utility" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-utility = { version = "2.0.0", path = "../utility" } [features] default = ["std"] diff --git a/frame/proxy/README.md b/frame/proxy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..26969db638289fbd7bdc3171460cb01e7f7bf850 --- /dev/null +++ b/frame/proxy/README.md @@ -0,0 +1,21 @@ +# Proxy Module +A module allowing accounts to give permission to other accounts to dispatch types of calls from +their signed origin. + +The accounts to which permission is delegated may be requied to announce the action that they +wish to execute some duration prior to execution happens. In this case, the target account may +reject the announcement and in doing so, veto the execution. + +- [`proxy::Trait`](https://docs.rs/pallet-proxy/latest/pallet_proxy/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-proxy/latest/pallet_proxy/enum.Call.html) + +## Overview + +## Interface + +### Dispatchable Functions + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 3cbe517dfd7da782cb69f0079037d035c1c09680..5f1d79741dd8e02cec922401b2be94c128a3eb61 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -20,21 +20,60 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_system::{RawOrigin, EventRecord}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Proxy; const SEED: u32 = 0; +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { - let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); + let caller = maybe_who.unwrap_or_else(|| whitelisted_caller()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); for i in 0..n { Proxy::::add_proxy( RawOrigin::Signed(caller.clone()).into(), account("target", i, SEED), - T::ProxyType::default() + T::ProxyType::default(), + T::BlockNumber::zero(), + )?; + } + Ok(()) +} + +fn add_announcements( + n: u32, + maybe_who: Option, + maybe_real: Option +) -> Result<(), &'static str> { + let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let real = if let Some(real) = maybe_real { + real + } else { + let real = account("real", 0, SEED); + T::Currency::make_free_balance_be(&real, BalanceOf::::max_value()); + Proxy::::add_proxy( + RawOrigin::Signed(real.clone()).into(), + caller.clone(), + T::ProxyType::default(), + T::BlockNumber::zero(), + )?; + real + }; + for _ in 0..n { + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&("add_announcement", n)), )?; } Ok(()) @@ -49,43 +88,171 @@ benchmarks! { let p in ...; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. - let real: T::AccountId = account("caller", 0, SEED); + let real: T::AccountId = whitelisted_caller(); let call: ::Call = frame_system::Call::::remark(vec![]).into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) + verify { + assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + } + + proxy_announced { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("anonymous", 0, SEED); + let delegate: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + Proxy::::announce( + RawOrigin::Signed(delegate.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(delegate.clone()), None)?; + }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) + verify { + assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + } + + remove_announcement { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(caller.clone()), None)?; + }: _(RawOrigin::Signed(caller.clone()), real, T::CallHasher::hash_of(&call)) + verify { + let (announcements, _) = Announcements::::get(&caller); + assert_eq!(announcements.len() as u32, a); + } + + reject_announcement { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + Proxy::::announce( + RawOrigin::Signed(caller.clone()).into(), + real.clone(), + T::CallHasher::hash_of(&call), + )?; + add_announcements::(a, Some(caller.clone()), None)?; + }: _(RawOrigin::Signed(real), caller.clone(), T::CallHasher::hash_of(&call)) + verify { + let (announcements, _) = Announcements::::get(&caller); + assert_eq!(announcements.len() as u32, a); + } + + announce { + let a in 0 .. T::MaxPending::get() - 1; + let p in ...; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // ... and "real" is the traditional caller. This is not a typo. + let real: T::AccountId = whitelisted_caller(); + add_announcements::(a, Some(caller.clone()), None)?; + let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call_hash = T::CallHasher::hash_of(&call); + }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) + verify { + assert_last_event::(RawEvent::Announced(real, caller, call_hash).into()); + } add_proxy { let p in ...; - let caller: T::AccountId = account("caller", 0, SEED); - }: _(RawOrigin::Signed(caller), account("target", T::MaxProxies::get().into(), SEED), T::ProxyType::default()) + let caller: T::AccountId = whitelisted_caller(); + }: _( + RawOrigin::Signed(caller.clone()), + account("target", T::MaxProxies::get().into(), SEED), + T::ProxyType::default(), + T::BlockNumber::zero() + ) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, p + 1); + } remove_proxy { let p in ...; - let caller: T::AccountId = account("caller", 0, SEED); - }: _(RawOrigin::Signed(caller), account("target", 0, SEED), T::ProxyType::default()) + let caller: T::AccountId = whitelisted_caller(); + }: _( + RawOrigin::Signed(caller.clone()), + account("target", 0, SEED), + T::ProxyType::default(), + T::BlockNumber::zero() + ) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, p - 1); + } remove_proxies { let p in ...; - let caller: T::AccountId = account("caller", 0, SEED); - }: _(RawOrigin::Signed(caller)) + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller.clone())) + verify { + let (proxies, _) = Proxies::::get(caller); + assert_eq!(proxies.len() as u32, 0); + } anonymous { let p in ...; - }: _(RawOrigin::Signed(account("caller", 0, SEED)), T::ProxyType::default(), 0) + let caller: T::AccountId = whitelisted_caller(); + }: _( + RawOrigin::Signed(caller.clone()), + T::ProxyType::default(), + T::BlockNumber::zero(), + 0 + ) + verify { + let anon_account = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + assert_last_event::(RawEvent::AnonymousCreated( + anon_account, + caller, + T::ProxyType::default(), + 0, + ).into()); + } kill_anonymous { let p in 0 .. (T::MaxProxies::get() - 2).into(); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - Module::::anonymous(RawOrigin::Signed(account("caller", 0, SEED)).into(), T::ProxyType::default(), 0)?; + Module::::anonymous( + RawOrigin::Signed(whitelisted_caller()).into(), + T::ProxyType::default(), + T::BlockNumber::zero(), + 0 + )?; let height = system::Module::::block_number(); let ext_index = system::Module::::extrinsic_index().unwrap_or(0); let anon = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); add_proxies::(p, Some(anon.clone()))?; - - }: _(RawOrigin::Signed(anon), caller, T::ProxyType::default(), 0, height, ext_index) + ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); + }: _(RawOrigin::Signed(anon.clone()), caller.clone(), T::ProxyType::default(), 0, height, ext_index) + verify { + assert!(!Proxies::::contains_key(&anon)); + } } #[cfg(test)] @@ -98,6 +265,10 @@ mod tests { fn test_benchmarks() { new_test_ext().execute_with(|| { assert_ok!(test_benchmark_proxy::()); + assert_ok!(test_benchmark_proxy_announced::()); + assert_ok!(test_benchmark_remove_announcement::()); + assert_ok!(test_benchmark_reject_announcement::()); + assert_ok!(test_benchmark_announce::()); assert_ok!(test_benchmark_add_proxy::()); assert_ok!(test_benchmark_remove_proxy::()); assert_ok!(test_benchmark_remove_proxies::()); diff --git a/frame/proxy/src/default_weight.rs b/frame/proxy/src/default_weight.rs new file mode 100644 index 0000000000000000000000000000000000000000..183c0b81c8a07e570edf914ba65b2456dba43e78 --- /dev/null +++ b/frame/proxy/src/default_weight.rs @@ -0,0 +1,84 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn proxy(p: u32, ) -> Weight { + (26127000 as Weight) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (55405000 as Weight) + .saturating_add((774000 as Weight).saturating_mul(a as Weight)) + .saturating_add((209000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_announcement(a: u32, p: u32, ) -> Weight { + (35879000 as Weight) + .saturating_add((783000 as Weight).saturating_mul(a as Weight)) + .saturating_add((20000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_announcement(a: u32, p: u32, ) -> Weight { + (36097000 as Weight) + .saturating_add((780000 as Weight).saturating_mul(a as Weight)) + .saturating_add((12000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn announce(a: u32, p: u32, ) -> Weight { + (53769000 as Weight) + .saturating_add((675000 as Weight).saturating_mul(a as Weight)) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn add_proxy(p: u32, ) -> Weight { + (36082000 as Weight) + .saturating_add((234000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxy(p: u32, ) -> Weight { + (32885000 as Weight) + .saturating_add((267000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxies(p: u32, ) -> Weight { + (31735000 as Weight) + .saturating_add((215000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn anonymous(p: u32, ) -> Weight { + (50907000 as Weight) + .saturating_add((61000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_anonymous(p: u32, ) -> Weight { + (33926000 as Weight) + .saturating_add((208000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index ec43007827667a5c5516f81a096961a84d49d5dd..4746a4ab67c1764223701820acde3ff6116c50fc 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -19,6 +19,10 @@ //! A module allowing accounts to give permission to other accounts to dispatch types of calls from //! their signed origin. //! +//! The accounts to which permission is delegated may be requied to announce the action that they +//! wish to execute some duration prior to execution happens. In this case, the target account may +//! reject the announcement and in doing so, veto the execution. +//! //! - [`proxy::Trait`](./trait.Trait.html) //! - [`Call`](./enum.Call.html) //! @@ -37,23 +41,27 @@ use sp_std::prelude::*; use codec::{Encode, Decode}; use sp_io::hashing::blake2_256; -use sp_runtime::{DispatchResult, traits::{Dispatchable, Zero}}; -use sp_runtime::traits::Member; +use sp_runtime::{DispatchResult, traits::{Dispatchable, Zero, Hash, Member, Saturating}}; use frame_support::{ - decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, traits::{ - Get, ReservableCurrency, Currency, InstanceFilter, - OriginTrait, IsType, - }, weights::{Weight, GetDispatchInfo, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}}, - dispatch::{PostDispatchInfo, IsSubType}, + decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug, traits::{ + Get, ReservableCurrency, Currency, InstanceFilter, OriginTrait, IsType, + }, weights::{Weight, GetDispatchInfo}, + dispatch::{PostDispatchInfo, IsSubType}, storage::IterableStorageMap, }; use frame_system::{self as system, ensure_signed}; +use frame_support::dispatch::DispatchError; mod tests; mod benchmarking; +mod default_weight; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; pub trait WeightInfo { + fn proxy_announced(a: u32, p: u32, ) -> Weight; + fn remove_announcement(a: u32, p: u32, ) -> Weight; + fn reject_announcement(a: u32, p: u32, ) -> Weight; + fn announce(a: u32, p: u32, ) -> Weight; fn proxy(p: u32, ) -> Weight; fn add_proxy(p: u32, ) -> Weight; fn remove_proxy(p: u32, ) -> Weight; @@ -62,15 +70,6 @@ pub trait WeightInfo { fn kill_anonymous(p: u32, ) -> Weight; } -impl WeightInfo for () { - fn proxy(_p: u32, ) -> Weight { 1_000_000_000 } - fn add_proxy(_p: u32, ) -> Weight { 1_000_000_000 } - fn remove_proxy(_p: u32, ) -> Weight { 1_000_000_000 } - fn remove_proxies(_p: u32, ) -> Weight { 1_000_000_000 } - fn anonymous(_p: u32, ) -> Weight { 1_000_000_000 } - fn kill_anonymous(_p: u32, ) -> Weight { 1_000_000_000 } -} - /// Configuration trait. pub trait Trait: frame_system::Trait { /// The overarching event type. @@ -108,19 +107,67 @@ pub trait Trait: frame_system::Trait { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// The maximum amount of time-delayed announcements that are allowed to be pending. + type MaxPending: Get; + + /// The type of hash used for hashing the call. + type CallHasher: Hash; + + /// The base amount of currency needed to reserve for creating an announcement. + /// + /// This is held when a new storage item holding a `Balance` is created (typically 16 bytes). + type AnnouncementDepositBase: Get>; + + /// The amount of currency needed per announcement made. + /// + /// This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes) + /// into a pre-existing storage value. + type AnnouncementDepositFactor: Get>; +} + +/// The parameters under which a particular account has a proxy relationship with some other +/// account. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +pub struct ProxyDefinition { + /// The account which may act on behalf of another. + delegate: AccountId, + /// A value defining the subset of calls that it is allowed to make. + proxy_type: ProxyType, + /// The number of blocks that an announcement must be in place for before the corresponding call + /// may be dispatched. If zero, then no announcement is needed. + delay: BlockNumber, +} + +/// Details surrounding a specific instance of an announcement to make a call. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug)] +pub struct Announcement { + /// The account which made the announcement. + real: AccountId, + /// The hash of the call to be made. + call_hash: Hash, + /// The height at which the announcement was made. + height: BlockNumber, } +type CallHashOf = <::CallHasher as Hash>::Output; + decl_storage! { trait Store for Module as Proxy { /// The set of account proxies. Maps the account which has delegated to the accounts /// which are being delegated to, together with the amount held on deposit. - pub Proxies: map hasher(twox_64_concat) T::AccountId => (Vec<(T::AccountId, T::ProxyType)>, BalanceOf); + pub Proxies: map hasher(twox_64_concat) T::AccountId + => (Vec>, BalanceOf); + + /// The announcements made by the proxy (key). + pub Announcements: map hasher(twox_64_concat) T::AccountId + => (Vec, T::BlockNumber>>, BalanceOf); } } decl_error! { pub enum Error for Module { - /// There are too many proxies registered. + /// There are too many proxies registered or too many announcements pending. TooMany, /// Proxy registration not found. NotFound, @@ -132,6 +179,8 @@ decl_error! { Duplicate, /// Call may not be made by proxy because it may escalate its privileges. NoPermission, + /// Announcement, if made at all, was made too recently. + Unannounced, } } @@ -139,13 +188,16 @@ decl_event! { /// Events type. pub enum Event where AccountId = ::AccountId, - ProxyType = ::ProxyType + ProxyType = ::ProxyType, + Hash = CallHashOf, { - /// A proxy was executed correctly, with the given [result]. + /// A proxy was executed correctly, with the given \[result\]. ProxyExecuted(DispatchResult), /// Anonymous account has been created by new proxy with given - /// disambiguation index and proxy type. [anonymous, who, proxy_type, disambiguation_index] + /// disambiguation index and proxy type. \[anonymous, who, proxy_type, disambiguation_index\] AnonymousCreated(AccountId, AccountId, ProxyType, u16), + /// An announcement was placed to make a call in the future. \[real, proxy, call_hash\] + Announced(AccountId, AccountId, Hash), } } @@ -165,9 +217,36 @@ decl_module! { /// The maximum amount of proxies allowed for a single account. const MaxProxies: u16 = T::MaxProxies::get(); + /// `MaxPending` metadata shadow. + const MaxPending: u32 = T::MaxPending::get(); + + /// `AnnouncementDepositBase` metadata shadow. + const AnnouncementDepositBase: BalanceOf = T::AnnouncementDepositBase::get(); + + /// `AnnouncementDepositFactor` metadata shadow. + const AnnouncementDepositFactor: BalanceOf = T::AnnouncementDepositFactor::get(); + + fn on_runtime_upgrade() -> Weight { + Proxies::::translate::<(Vec<(T::AccountId, T::ProxyType)>, BalanceOf), _>( + |_, (targets, deposit)| Some(( + targets.into_iter() + .map(|(a, t)| ProxyDefinition { + delegate: a, + proxy_type: t, + delay: Zero::zero(), + }) + .collect::>(), + deposit, + )) + ); + T::MaximumBlockWeight::get() + } + /// Dispatch the given `call` from an account that the sender is authorised for through /// `add_proxy`. /// + /// Removes any corresponding announcement(s). + /// /// The dispatch origin for this call must be _Signed_. /// /// Parameters: @@ -176,43 +255,24 @@ decl_module! { /// - `call`: The call to be made by the `real` account. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 19.87 + .141 * P µs - /// - DB weight: 1 storage read. - /// - Plus the weight of the `call` + /// Weight is a function of the number of proxies the user has (P). /// # #[weight = { let di = call.get_dispatch_info(); - (T::DbWeight::get().reads(1) - .saturating_add(di.weight) - .saturating_add(20 * WEIGHT_PER_MICROS) - .saturating_add((140 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())), + (T::WeightInfo::proxy(T::MaxProxies::get().into()) + .saturating_add(di.weight), di.class) }] fn proxy(origin, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call> + call: Box<::Call>, ) { let who = ensure_signed(origin)?; - let (_, proxy_type) = Proxies::::get(&real).0.into_iter() - .find(|x| &x.0 == &who && force_proxy_type.as_ref().map_or(true, |y| &x.1 == y)) - .ok_or(Error::::NotProxy)?; + let def = Self::find_proxy(&real, &who, force_proxy_type)?; + ensure!(def.delay.is_zero(), Error::::Unannounced); - // This is a freshly authenticated new account, the origin restrictions doesn't apply. - let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); - origin.add_filter(move |c: &::Call| { - let c = ::Call::from_ref(c); - match c.is_sub_type() { - Some(Call::add_proxy(_, ref pt)) | Some(Call::remove_proxy(_, ref pt)) - if !proxy_type.is_superset(&pt) => false, - Some(Call::remove_proxies(..)) | Some(Call::kill_anonymous(..)) - if proxy_type != T::ProxyType::default() => false, - _ => proxy_type.filter(c) - } - }); - let e = call.dispatch(origin); - Self::deposit_event(RawEvent::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); + Self::do_proxy(def, real, *call); } /// Register a proxy account for the sender that is able to make calls on its behalf. @@ -224,21 +284,20 @@ decl_module! { /// - `proxy_type`: The permissions allowed for this proxy account. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 17.48 + .176 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(18 * WEIGHT_PER_MICROS) - .saturating_add((200 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] - fn add_proxy(origin, proxy: T::AccountId, proxy_type: T::ProxyType) -> DispatchResult { + #[weight = T::WeightInfo::add_proxy(T::MaxProxies::get().into())] + fn add_proxy(origin, + delegate: T::AccountId, + proxy_type: T::ProxyType, + delay: T::BlockNumber, + ) -> DispatchResult { let who = ensure_signed(origin)?; Proxies::::try_mutate(&who, |(ref mut proxies, ref mut deposit)| { ensure!(proxies.len() < T::MaxProxies::get() as usize, Error::::TooMany); - let typed_proxy = (proxy, proxy_type); - let i = proxies.binary_search(&typed_proxy).err().ok_or(Error::::Duplicate)?; - proxies.insert(i, typed_proxy); + let proxy_def = ProxyDefinition { delegate, proxy_type, delay }; + let i = proxies.binary_search(&proxy_def).err().ok_or(Error::::Duplicate)?; + proxies.insert(i, proxy_def); let new_deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get() * (proxies.len() as u32).into(); if new_deposit > *deposit { @@ -260,20 +319,19 @@ decl_module! { /// - `proxy_type`: The permissions currently enabled for the removed proxy account. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 14.37 + .164 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(14 * WEIGHT_PER_MICROS) - .saturating_add((160 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] - fn remove_proxy(origin, proxy: T::AccountId, proxy_type: T::ProxyType) -> DispatchResult { + #[weight = T::WeightInfo::remove_proxy(T::MaxProxies::get().into())] + fn remove_proxy(origin, + delegate: T::AccountId, + proxy_type: T::ProxyType, + delay: T::BlockNumber, + ) -> DispatchResult { let who = ensure_signed(origin)?; Proxies::::try_mutate_exists(&who, |x| { let (mut proxies, old_deposit) = x.take().ok_or(Error::::NotFound)?; - let typed_proxy = (proxy, proxy_type); - let i = proxies.binary_search(&typed_proxy).ok().ok_or(Error::::NotFound)?; + let proxy_def = ProxyDefinition { delegate, proxy_type, delay }; + let i = proxies.binary_search(&proxy_def).ok().ok_or(Error::::NotFound)?; proxies.remove(i); let new_deposit = if proxies.is_empty() { BalanceOf::::zero() @@ -300,14 +358,9 @@ decl_module! { /// the unreserved fees will be inaccessible. **All access to this account will be lost.** /// /// # - /// P is the number of proxies the user has - /// - Base weight: 13.73 + .129 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(14 * WEIGHT_PER_MICROS) - .saturating_add((130 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] + #[weight = T::WeightInfo::remove_proxies(T::MaxProxies::get().into())] fn remove_proxies(origin) { let who = ensure_signed(origin)?; let (_, old_deposit) = Proxies::::take(&who); @@ -325,6 +378,8 @@ decl_module! { /// - `index`: A disambiguation index, in case this is called multiple times in the same /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just /// want to use `0`. + /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// zero. /// /// Fails with `Duplicate` if this has already been called in this transaction, from the /// same sender, with the same parameters. @@ -332,22 +387,23 @@ decl_module! { /// Fails if there are insufficient funds to pay for deposit. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 36.48 + .039 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(36 * WEIGHT_PER_MICROS) - .saturating_add((40 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] - fn anonymous(origin, proxy_type: T::ProxyType, index: u16) { + /// TODO: Might be over counting 1 read + #[weight = T::WeightInfo::anonymous(T::MaxProxies::get().into())] + fn anonymous(origin, proxy_type: T::ProxyType, delay: T::BlockNumber, index: u16) { let who = ensure_signed(origin)?; let anonymous = Self::anonymous_account(&who, &proxy_type, index, None); ensure!(!Proxies::::contains_key(&anonymous), Error::::Duplicate); let deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get(); T::Currency::reserve(&who, deposit)?; - Proxies::::insert(&anonymous, (vec![(who.clone(), proxy_type.clone())], deposit)); + let proxy_def = ProxyDefinition { + delegate: who.clone(), + proxy_type: proxy_type.clone(), + delay, + }; + Proxies::::insert(&anonymous, (vec![proxy_def], deposit)); Self::deposit_event(RawEvent::AnonymousCreated(anonymous, who, proxy_type, index)); } @@ -369,14 +425,9 @@ decl_module! { /// account whose `anonymous` call has corresponding parameters. /// /// # - /// P is the number of proxies the user has - /// - Base weight: 15.65 + .137 * P µs - /// - DB weight: 1 storage read and write. + /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(15 * WEIGHT_PER_MICROS) - .saturating_add((140 * WEIGHT_PER_NANOS).saturating_mul(T::MaxProxies::get().into())) - ] + #[weight = T::WeightInfo::kill_anonymous(T::MaxProxies::get().into())] fn kill_anonymous(origin, spawner: T::AccountId, proxy_type: T::ProxyType, @@ -393,6 +444,140 @@ decl_module! { let (_, deposit) = Proxies::::take(&who); T::Currency::unreserve(&spawner, deposit); } + + /// Publish the hash of a proxy-call that will be made in the future. + /// + /// This must be called some number of blocks before the corresponding `proxy` is attempted + /// if the delay associated with the proxy relationship is greater than zero. + /// + /// No more than `MaxPending` announcements may be made at any one time. + /// + /// This will take a deposit of `AnnouncementDepositFactor` as well as + /// `AnnouncementDepositBase` if there are no other pending announcements. + /// + /// The dispatch origin for this call must be _Signed_ and a proxy of `real`. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `call_hash`: The hash of the call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get().into())] + fn announce(origin, real: T::AccountId, call_hash: CallHashOf) { + let who = ensure_signed(origin)?; + Proxies::::get(&real).0.into_iter() + .find(|x| &x.delegate == &who) + .ok_or(Error::::NotProxy)?; + + let announcement = Announcement { + real: real.clone(), + call_hash: call_hash.clone(), + height: system::Module::::block_number(), + }; + + Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { + ensure!(pending.len() < T::MaxPending::get() as usize, Error::::TooMany); + pending.push(announcement); + Self::rejig_deposit( + &who, + *deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + pending.len(), + ).map(|d| d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed")) + .map(|d| *deposit = d) + })?; + Self::deposit_event(RawEvent::Announced(real, who, call_hash)); + } + + /// Remove a given announcement. + /// + /// May be called by a proxy account to remove a call they previously announced and return + /// the deposit. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `call_hash`: The hash of the call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = T::WeightInfo::remove_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] + fn remove_announcement(origin, real: T::AccountId, call_hash: CallHashOf) { + let who = ensure_signed(origin)?; + Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; + } + + /// Remove the given announcement of a delegate. + /// + /// May be called by a target (proxied) account to remove a call that one of their delegates + /// (`delegate`) has announced they want to execute. The deposit is returned. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `delegate`: The account that previously announced the call. + /// - `call_hash`: The hash of the call to be made. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = T::WeightInfo::reject_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] + fn reject_announcement(origin, delegate: T::AccountId, call_hash: CallHashOf) { + let who = ensure_signed(origin)?; + Self::edit_announcements(&delegate, |ann| ann.real != who || ann.call_hash != call_hash)?; + } + + /// Dispatch the given `call` from an account that the sender is authorised for through + /// `add_proxy`. + /// + /// Removes any corresponding announcement(s). + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `real`: The account that the proxy will make a call on behalf of. + /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. + /// - `call`: The call to be made by the `real` account. + /// + /// # + /// Weight is a function of: + /// - A: the number of announcements made. + /// - P: the number of proxies the user has. + /// # + #[weight = { + let di = call.get_dispatch_info(); + (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get().into()) + .saturating_add(di.weight), + di.class) + }] + fn proxy_announced(origin, + delegate: T::AccountId, + real: T::AccountId, + force_proxy_type: Option, + call: Box<::Call>, + ) { + ensure_signed(origin)?; + let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; + + let call_hash = T::CallHasher::hash_of(&call); + let now = system::Module::::block_number(); + Self::edit_announcements(&delegate, |ann| + ann.real != real || ann.call_hash != call_hash || now.saturating_sub(ann.height) < def.delay + ).map_err(|_| Error::::Unannounced)?; + + Self::do_proxy(def, real, *call); + } } } @@ -411,4 +596,82 @@ impl Module { .using_encoded(blake2_256); T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() } + + fn rejig_deposit( + who: &T::AccountId, + old_deposit: BalanceOf, + base: BalanceOf, + factor: BalanceOf, + len: usize, + ) -> Result>, DispatchError> { + let new_deposit = if len == 0 { + BalanceOf::::zero() + } else { + base + factor * (len as u32).into() + }; + if new_deposit > old_deposit { + T::Currency::reserve(&who, new_deposit - old_deposit)?; + } else if new_deposit < old_deposit { + T::Currency::unreserve(&who, old_deposit - new_deposit); + } + Ok(if len == 0 { + None + } else { + Some(new_deposit) + }) + } + + fn edit_announcements< + F: FnMut(&Announcement, T::BlockNumber>) -> bool + >(delegate: &T::AccountId, f: F) -> DispatchResult { + Announcements::::try_mutate_exists(delegate, |x| { + let (mut pending, old_deposit) = x.take().ok_or(Error::::NotFound)?; + let orig_pending_len = pending.len(); + pending.retain(f); + ensure!(orig_pending_len > pending.len(), Error::::NotFound); + *x = Self::rejig_deposit( + delegate, + old_deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + pending.len(), + )?.map(|deposit| (pending, deposit)); + Ok(()) + }) + } + + fn find_proxy( + real: &T::AccountId, + delegate: &T::AccountId, + force_proxy_type: Option, + ) -> Result, DispatchError> { + let f = |x: &ProxyDefinition| -> bool { + &x.delegate == delegate && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) + }; + Ok(Proxies::::get(real).0.into_iter().find(f).ok_or(Error::::NotProxy)?) + } + + fn do_proxy( + def: ProxyDefinition, + real: T::AccountId, + call: ::Call, + ) { + // This is a freshly authenticated new account, the origin restrictions doesn't apply. + let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); + origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); + // We make sure the proxy call does access this pallet to change modify proxies. + match c.is_sub_type() { + // Proxy call cannot add or remove a proxy with more permissions than it already has. + Some(Call::add_proxy(_, ref pt, _)) | Some(Call::remove_proxy(_, ref pt, _)) + if !def.proxy_type.is_superset(&pt) => false, + // Proxy call cannot remove all proxies or kill anonymous proxies unless it has full permissions. + Some(Call::remove_proxies(..)) | Some(Call::kill_anonymous(..)) + if def.proxy_type != T::ProxyType::default() => false, + _ => def.proxy_type.filter(c) + } + }); + let e = call.dispatch(origin); + Self::deposit_event(RawEvent::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); + } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 11f11e24d47725eac8a87fb37f37933aad199c8d..bcf3b678ed64438df8735234a77ef6ed61925322 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -82,7 +82,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -92,6 +92,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = TestEvent; type DustRemoval = (); @@ -108,6 +109,9 @@ parameter_types! { pub const ProxyDepositBase: u64 = 1; pub const ProxyDepositFactor: u64 = 1; pub const MaxProxies: u16 = 4; + pub const MaxPending: u32 = 2; + pub const AnnouncementDepositBase: u64 = 1; + pub const AnnouncementDepositFactor: u64 = 1; } #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] pub enum ProxyType { @@ -148,6 +152,10 @@ impl Trait for Test { type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; type WeightInfo = (); + type CallHasher = BlakeTwo256; + type MaxPending = MaxPending; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; } type System = frame_system::Module; @@ -189,13 +197,134 @@ fn expect_events(e: Vec) { assert_eq!(last_events(e.len()), e); } +#[test] +fn announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_eq!(Balances::reserved_balance(3), 0); + + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + assert_eq!(Announcements::::get(3), (vec![ + Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1, + }, + Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1, + }, + ], 3)); + assert_eq!(Balances::reserved_balance(3), 3); + + assert_noop!(Proxy::announce(Origin::signed(3), 2, [3; 32].into()), Error::::TooMany); + }); +} + +#[test] +fn remove_announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + let e = Error::::NotFound; + assert_noop!(Proxy::remove_announcement(Origin::signed(3), 1, [0; 32].into()), e); + assert_ok!(Proxy::remove_announcement(Origin::signed(3), 1, [1; 32].into())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + }); +} + +#[test] +fn reject_announcement_works() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + let e = Error::::NotFound; + assert_noop!(Proxy::reject_announcement(Origin::signed(1), 3, [0; 32].into()), e); + let e = Error::::NotFound; + assert_noop!(Proxy::reject_announcement(Origin::signed(4), 3, [1; 32].into()), e); + assert_ok!(Proxy::reject_announcement(Origin::signed(1), 3, [1; 32].into())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 2, + call_hash: [2; 32].into(), + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + }); +} + +#[test] +fn announcer_must_be_proxy() { + new_test_ext().execute_with(|| { + assert_noop!(Proxy::announce(Origin::signed(2), 1, H256::zero()), Error::::NotProxy); + }); +} + +#[test] +fn delayed_requires_pre_announcement() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 1)); + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let e = Error::::Unannounced; + assert_noop!(Proxy::proxy(Origin::signed(2), 1, None, call.clone()), e); + let e = Error::::Unannounced; + assert_noop!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone()), e); + let call_hash = BlakeTwo256::hash_of(&call); + assert_ok!(Proxy::announce(Origin::signed(2), 1, call_hash)); + system::Module::::set_block_number(2); + assert_ok!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone())); + }); +} + +#[test] +fn proxy_announced_removes_announcement_and_returns_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let call_hash = BlakeTwo256::hash_of(&call); + assert_ok!(Proxy::announce(Origin::signed(3), 1, call_hash)); + assert_ok!(Proxy::announce(Origin::signed(3), 2, call_hash)); + // Too early to execute announced call + let e = Error::::Unannounced; + assert_noop!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone()), e); + + system::Module::::set_block_number(2); + assert_ok!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone())); + assert_eq!(Announcements::::get(3), (vec![Announcement { + real: 2, + call_hash, + height: 1, + }], 2)); + assert_eq!(Balances::reserved_balance(3), 2); + }); +} + #[test] fn filtering_works() { new_test_ext().execute_with(|| { Balances::mutate_account(&1, |a| a.free = 1000); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); @@ -228,7 +357,7 @@ fn filtering_works() { RawEvent::ProxyExecuted(Ok(())).into(), ]); - let inner = Box::new(Call::Proxy(ProxyCall::add_proxy(5, ProxyType::Any))); + let inner = Box::new(Call::Proxy(ProxyCall::add_proxy(5, ProxyType::Any, 0))); let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![UtilityEvent::BatchCompleted.into(), RawEvent::ProxyExecuted(Ok(())).into()]); @@ -253,24 +382,24 @@ fn filtering_works() { #[test] fn add_remove_proxies_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any)); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any), Error::::Duplicate); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); + assert_noop!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), Error::::Duplicate); assert_eq!(Balances::reserved_balance(1), 2); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 5); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any), Error::::TooMany); - assert_noop!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer), Error::::NotFound); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility)); + assert_noop!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), Error::::TooMany); + assert_noop!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), Error::::NotFound); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::Any)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 2); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer)); + assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -278,10 +407,10 @@ fn add_remove_proxies_works() { #[test] fn cannot_add_proxy_without_balance() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(5), 3, ProxyType::Any)); + assert_ok!(Proxy::add_proxy(Origin::signed(5), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(5), 2); assert_noop!( - Proxy::add_proxy(Origin::signed(5), 4, ProxyType::Any), + Proxy::add_proxy(Origin::signed(5), 4, ProxyType::Any, 0), BalancesError::::InsufficientBalance ); }); @@ -290,8 +419,8 @@ fn cannot_add_proxy_without_balance() { #[test] fn proxying_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_noop!(Proxy::proxy(Origin::signed(4), 1, None, call.clone()), Error::::NotProxy); @@ -319,21 +448,21 @@ fn proxying_works() { #[test] fn anonymous_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); expect_event(RawEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0)); // other calls to anonymous allowed as long as they're not exactly the same. - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0)); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 1)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 1)); let anon2 = Proxy::anonymous_account(&2, &ProxyType::Any, 0, None); - assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0)); - assert_noop!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0), Error::::Duplicate); + assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0, 0)); + assert_noop!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), Error::::Duplicate); System::set_extrinsic_index(1); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); System::set_extrinsic_index(0); System::set_block_number(2); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0)); + assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); assert_ok!(Balances::transfer(Origin::signed(3), anon, 5)); diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 28a16dc6411b65da46d9801c92cb7cf9308c469d..d35f6960af5d989cb688f0b002c071a07507b64c 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-randomness-collective-flip" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME randomness collective flip pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] safe-mix = { version = "1.0", default-features = false } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io = { version = "2.0.0", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/randomness-collective-flip/README.md b/frame/randomness-collective-flip/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2af18d3d2f7b589c55e738b95b358542661e3c07 --- /dev/null +++ b/frame/randomness-collective-flip/README.md @@ -0,0 +1,38 @@ +# Randomness Module + +The Randomness Collective Flip module provides a [`random`](https://docs.rs/pallet-randomness-collective-flip/latest/pallet_randomness_collective_flip/struct.Module.html#method.random) +function that generates low-influence random values based on the block hashes from the previous +`81` blocks. Low-influence randomness can be useful when defending against relatively weak +adversaries. Using this pallet as a randomness source is advisable primarily in low-security +situations like testing. + +## Public Functions + +See the [`Module`](https://docs.rs/pallet-randomness-collective-flip/latest/pallet_randomness_collective_flip/struct.Module.html) struct for details of publicly available functions. + +## Usage + +### Prerequisites + +Import the Randomness Collective Flip module and derive your module's configuration trait from +the system trait. + +### Example - Get random seed for the current block + +```rust +use frame_support::{decl_module, dispatch, traits::Randomness}; + +pub trait Trait: frame_system::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn random_module_example(origin) -> dispatch::DispatchResult { + let _random_value = >::random(&b"my context"[..]); + Ok(()) + } + } +} +``` + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 74a08c0150935003c5e0b49d1e7cccbda089a339..6b1b9f4f37448b4fbc82943a51d6e82222272a3f 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -178,7 +178,7 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 6302a817171c0fd983c93aed25142d8b57db966a..0ba2f5437c614746a3fe21c4b34c246a449ce13e 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-recovery" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME account recovery pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,15 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/recovery/README.md b/frame/recovery/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b6d3ae5aceeb38471559dc7657febe6b40b5e720 --- /dev/null +++ b/frame/recovery/README.md @@ -0,0 +1,134 @@ +# Recovery Pallet + +- [`recovery::Trait`](https://docs.rs/pallet-recovery/latest/pallet_recovery/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-recovery/latest/pallet_recovery/enum.Call.html) + +## Overview + +The Recovery pallet is an M-of-N social recovery tool for users to gain +access to their accounts if the private key or other authentication mechanism +is lost. Through this pallet, a user is able to make calls on-behalf-of another +account which they have recovered. The recovery process is protected by trusted +"friends" whom the original account owner chooses. A threshold (M) out of N +friends are needed to give another account access to the recoverable account. + +### Recovery Configuration + +The recovery process for each recoverable account can be configured by the account owner. +They are able to choose: +* `friends` - The list of friends that the account owner trusts to protect the + recovery process for their account. +* `threshold` - The number of friends that need to approve a recovery process for + the account to be successfully recovered. +* `delay_period` - The minimum number of blocks after the beginning of the recovery + process that need to pass before the account can be successfully recovered. + +There is a configurable deposit that all users need to pay to create a recovery +configuration. This deposit is composed of a base deposit plus a multiplier for +the number of friends chosen. This deposit is returned in full when the account +owner removes their recovery configuration. + +### Recovery Life Cycle + +The intended life cycle of a successful recovery takes the following steps: +1. The account owner calls `create_recovery` to set up a recovery configuration + for their account. +2. At some later time, the account owner loses access to their account and wants + to recover it. Likely, they will need to create a new account and fund it with + enough balance to support the transaction fees and the deposit for the + recovery process. +3. Using this new account, they call `initiate_recovery`. +4. Then the account owner would contact their configured friends to vouch for + the recovery attempt. The account owner would provide their old account id + and the new account id, and friends would call `vouch_recovery` with those + parameters. +5. Once a threshold number of friends have vouched for the recovery attempt, + the account owner needs to wait until the delay period has passed, starting + when they initiated the recovery process. +6. Now the account owner is able to call `claim_recovery`, which subsequently + allows them to call `as_recovered` and directly make calls on-behalf-of the lost + account. +7. Using the now recovered account, the account owner can call `close_recovery` + on the recovery process they opened, reclaiming the recovery deposit they + placed. +8. Then the account owner should then call `remove_recovery` to remove the recovery + configuration on the recovered account and reclaim the recovery configuration + deposit they placed. +9. Using `as_recovered`, the account owner is able to call any other pallets + to clean up their state and reclaim any reserved or locked funds. They + can then transfer all funds from the recovered account to the new account. +10. When the recovered account becomes reaped (i.e. its free and reserved + balance drops to zero), the final recovery link is removed. + +### Malicious Recovery Attempts + +Initializing a the recovery process for a recoverable account is open and +permissionless. However, the recovery deposit is an economic deterrent that +should disincentivize would-be attackers from trying to maliciously recover +accounts. + +The recovery deposit can always be claimed by the account which is trying to +to be recovered. In the case of a malicious recovery attempt, the account +owner who still has access to their account can claim the deposit and +essentially punish the malicious user. + +Furthermore, the malicious recovery attempt can only be successful if the +attacker is also able to get enough friends to vouch for the recovery attempt. +In the case where the account owner prevents a malicious recovery process, +this pallet makes it near-zero cost to re-configure the recovery settings and +remove/replace friends who are acting inappropriately. + +### Safety Considerations + +It is important to note that this is a powerful pallet that can compromise the +security of an account if used incorrectly. Some recommended practices for users +of this pallet are: + +* Configure a significant `delay_period` for your recovery process: As long as you + have access to your recoverable account, you need only check the blockchain once + every `delay_period` blocks to ensure that no recovery attempt is successful + against your account. Using off-chain notification systems can help with this, + but ultimately, setting a large `delay_period` means that even the most skilled + attacker will need to wait this long before they can access your account. +* Use a high threshold of approvals: Setting a value of 1 for the threshold means + that any of your friends would be able to recover your account. They would + simply need to start a recovery process and approve their own process. Similarly, + a threshold of 2 would mean that any 2 friends could work together to gain + access to your account. The only way to prevent against these kinds of attacks + is to choose a high threshold of approvals and select from a diverse friend + group that would not be able to reasonably coordinate with one another. +* Reset your configuration over time: Since the entire deposit of creating a + recovery configuration is returned to the user, the only cost of updating + your recovery configuration is the transaction fees for the calls. Thus, + it is strongly encouraged to regularly update your recovery configuration + as your life changes and your relationship with new and existing friends + change as well. + +## Interface + +### Dispatchable Functions + +#### For General Users + +* `create_recovery` - Create a recovery configuration for your account and make it recoverable. +* `initiate_recovery` - Start the recovery process for a recoverable account. + +#### For Friends of a Recoverable Account +* `vouch_recovery` - As a `friend` of a recoverable account, vouch for a recovery attempt on the account. + +#### For a User Who Successfully Recovered an Account + +* `claim_recovery` - Claim access to the account that you have successfully completed the recovery process for. +* `as_recovered` - Send a transaction as an account that you have recovered. See other functions below. + +#### For the Recoverable Account + +* `close_recovery` - Close an active recovery process for your account and reclaim the recovery deposit. +* `remove_recovery` - Remove the recovery configuration from the account, making it un-recoverable. + +#### For Super Users + +* `set_recovered` - The ROOT origin is able to skip the recovery process and directly allow + one account to access another. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 1c0dd5041380f750eb1054eef8c9ef6206cd10fc..b3aad8433eb3c671457f490a640f09c7758155f6 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -264,21 +264,21 @@ decl_event! { pub enum Event where AccountId = ::AccountId, { - /// A recovery process has been set up for an [account]. + /// A recovery process has been set up for an \[account\]. RecoveryCreated(AccountId), /// A recovery process has been initiated for lost account by rescuer account. - /// [lost, rescuer] + /// \[lost, rescuer\] RecoveryInitiated(AccountId, AccountId), /// A recovery process for lost account by rescuer account has been vouched for by sender. - /// [lost, rescuer, sender] + /// \[lost, rescuer, sender\] RecoveryVouched(AccountId, AccountId, AccountId), /// A recovery process for lost account by rescuer account has been closed. - /// [lost, rescuer] + /// \[lost, rescuer\] RecoveryClosed(AccountId, AccountId), /// Lost account has been successfully recovered by rescuer account. - /// [lost, rescuer] + /// \[lost, rescuer\] AccountRecovered(AccountId, AccountId), - /// A recovery process has been removed for an [account]. + /// A recovery process has been removed for an \[account\]. RecoveryRemoved(AccountId), } } diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 6b8ef169c0076ee441e19d6d849063b1b09e7b5b..35373562487f72dd1496521b8459fbcb60d7a332 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -79,7 +79,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -91,6 +91,7 @@ parameter_types! { } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u128; type DustRemoval = (); type Event = TestEvent; diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 14a310ebe504138602e388a7ba832c8e91b86351..613762bb689e971413285c41c5b6d69078c8e3d1 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -1,27 +1,28 @@ [package] name = "pallet-scheduler" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet" +readme = "README.md" [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core", default-features = false } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } +sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } +substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } [features] default = ["std"] diff --git a/frame/scheduler/README.md b/frame/scheduler/README.md new file mode 100644 index 0000000000000000000000000000000000000000..47beb71e3a0d1eeeec66d4df9a54aec775a94120 --- /dev/null +++ b/frame/scheduler/README.md @@ -0,0 +1,34 @@ +# Scheduler +A module for scheduling dispatches. + +- [`scheduler::Trait`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/enum.Call.html) +- [`Module`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/struct.Module.html) + +## Overview + +This module exposes capabilities for scheduling dispatches to occur at a +specified block number or at a specified period. These scheduled dispatches +may be named or anonymous and may be canceled. + +**NOTE:** The scheduled calls will be dispatched with the default filter +for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +except root which will get no filter. And not the filter contained in origin +use to call `fn schedule`. + +If a call is scheduled using proxy or whatever mecanism which adds filter, +then those filter will not be used when dispatching the schedule call. + +## Interface + +### Dispatchable Functions + +* `schedule` - schedule a dispatch, which may be periodic, to occur at a + specified block and with a specified priority. +* `cancel` - cancel a scheduled dispatch, specified by block number and + index. +* `schedule_named` - augments the `schedule` interface with an additional + `Vec` parameter that can be used for identification. +* `cancel_named` - the named complement to the cancel function. + +License: Unlicense \ No newline at end of file diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 847460fe85a502b0f1a3dea0f46e6b42fdd21604..753e9244628ad5cc3e8e3c77daf951212becb22a 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -28,7 +28,6 @@ use frame_benchmarking::benchmarks; use crate::Module as Scheduler; use frame_system::Module as System; -const MAX_SCHEDULED: u32 = 50; const BLOCK_NUMBER: u32 = 2; // Add `n` named items to the schedule @@ -56,7 +55,7 @@ benchmarks! { _ { } schedule { - let s in 0 .. MAX_SCHEDULED; + let s in 0 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; @@ -73,7 +72,7 @@ benchmarks! { } cancel { - let s in 1 .. MAX_SCHEDULED; + let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; @@ -92,7 +91,7 @@ benchmarks! { } schedule_named { - let s in 0 .. MAX_SCHEDULED; + let s in 0 .. T::MaxScheduledPerBlock::get(); let id = s.encode(); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); @@ -110,7 +109,7 @@ benchmarks! { } cancel_named { - let s in 1 .. MAX_SCHEDULED; + let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; @@ -127,8 +126,10 @@ benchmarks! { ); } + // TODO [#7141]: Make this more complex and flexible so it can be used in automation. + #[extra] on_initialize { - let s in 0 .. MAX_SCHEDULED; + let s in 0 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } diff --git a/frame/scheduler/src/default_weights.rs b/frame/scheduler/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..920de1d37a07c736042e334077f9480ffb49d431 --- /dev/null +++ b/frame/scheduler/src/default_weights.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn schedule(s: u32, ) -> Weight { + (37_835_000 as Weight) + .saturating_add((81_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel(s: u32, ) -> Weight { + (34_707_000 as Weight) + .saturating_add((3_125_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn schedule_named(s: u32, ) -> Weight { + (48_065_000 as Weight) + .saturating_add((110_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn cancel_named(s: u32, ) -> Weight { + (38_776_000 as Weight) + .saturating_add((3_138_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index ccd1a4c43b6eaa72709123649913149abcf3ea83..54d3e1373b9f9b49be9c6db92c8a4d258c8fc8d3 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -52,6 +52,7 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +mod default_weights; use sp_std::{prelude::*, marker::PhantomData, borrow::Borrow}; use codec::{Encode, Decode, Codec}; @@ -69,15 +70,6 @@ pub trait WeightInfo { fn cancel(s: u32, ) -> Weight; fn schedule_named(s: u32, ) -> Weight; fn cancel_named(s: u32, ) -> Weight; - fn on_initialize(s: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn schedule(_s: u32, ) -> Weight { 1_000_000_000 } - fn cancel(_s: u32, ) -> Weight { 1_000_000_000 } - fn schedule_named(_s: u32, ) -> Weight { 1_000_000_000 } - fn cancel_named(_s: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize(_s: u32, ) -> Weight { 1_000_000_000 } } /// Our pallet's configuration trait. All our types and constants go in here. If the @@ -106,6 +98,10 @@ pub trait Trait: system::Trait { /// Required origin to schedule or cancel calls. type ScheduleOrigin: EnsureOrigin<::Origin>; + /// The maximum number of scheduled calls in the queue for a single block. + /// Not strictly enforced, but used for weight estimation. + type MaxScheduledPerBlock: Get; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -177,11 +173,11 @@ decl_storage! { decl_event!( pub enum Event where ::BlockNumber { - /// Scheduled some task. [when, index] + /// Scheduled some task. \[when, index\] Scheduled(BlockNumber, u32), - /// Canceled some task. [when, index] + /// Canceled some task. \[when, index\] Canceled(BlockNumber, u32), - /// Dispatched some task. [task, id, result] + /// Dispatched some task. \[task, id, result\] Dispatched(TaskAddress, Option>, DispatchResult), } ); @@ -215,7 +211,7 @@ decl_module! { /// - Write: Agenda /// - Will use base weight of 25 which should be good for up to 30 scheduled calls /// # - #[weight = 25_000_000 + T::DbWeight::get().reads_writes(1, 1)] + #[weight = T::WeightInfo::schedule(T::MaxScheduledPerBlock::get())] fn schedule(origin, when: T::BlockNumber, maybe_periodic: Option>, @@ -237,7 +233,7 @@ decl_module! { /// - Write: Agenda, Lookup /// - Will use base weight of 100 which should be good for up to 30 scheduled calls /// # - #[weight = 100_000_000 + T::DbWeight::get().reads_writes(1, 2)] + #[weight = T::WeightInfo::cancel(T::MaxScheduledPerBlock::get())] fn cancel(origin, when: T::BlockNumber, index: u32) { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::Origin::from(origin); @@ -254,7 +250,7 @@ decl_module! { /// - Write: Agenda, Lookup /// - Will use base weight of 35 which should be good for more than 30 scheduled calls /// # - #[weight = 35_000_000 + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get())] fn schedule_named(origin, id: Vec, when: T::BlockNumber, @@ -279,7 +275,7 @@ decl_module! { /// - Write: Agenda, Lookup /// - Will use base weight of 100 which should be good for up to 30 scheduled calls /// # - #[weight = 100_000_000 + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get())] fn cancel_named(origin, id: Vec) { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::Origin::from(origin); @@ -291,7 +287,7 @@ decl_module! { /// # /// Same as [`schedule`]. /// # - #[weight = 25_000_000 + T::DbWeight::get().reads_writes(1, 1)] + #[weight = T::WeightInfo::schedule(T::MaxScheduledPerBlock::get())] fn schedule_after(origin, after: T::BlockNumber, maybe_periodic: Option>, @@ -310,7 +306,7 @@ decl_module! { /// # /// Same as [`schedule_named`]. /// # - #[weight = 35_000_000 + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get())] fn schedule_named_after(origin, id: Vec, after: T::BlockNumber, @@ -342,16 +338,20 @@ decl_module! { .enumerate() .filter_map(|(index, s)| s.map(|inner| (index as u32, inner))) .collect::>(); + if queued.len() as u32 > T::MaxScheduledPerBlock::get() { + frame_support::debug::warn!( + "Warning: This block has more items queued in Scheduler than \ + expected from the runtime configuration. An update might be needed." + ); + } queued.sort_by_key(|(_, s)| s.priority); - let base_weight: Weight = T::DbWeight::get().reads_writes(1, 2) // Agenda + Agenda(next) - .saturating_add(10_000_000); // Base Weight + let base_weight: Weight = T::DbWeight::get().reads_writes(1, 2); // Agenda + Agenda(next) let mut total_weight: Weight = 0; queued.into_iter() .enumerate() .scan(base_weight, |cumulative_weight, (order, (index, s))| { *cumulative_weight = cumulative_weight - .saturating_add(s.call.get_dispatch_info().weight) - .saturating_add(25_000_000); // Base multiplier + .saturating_add(s.call.get_dispatch_info().weight); if s.maybe_id.is_some() { // Remove/Modify Lookup @@ -440,6 +440,25 @@ impl Module { } } + /// Helper to migrate scheduler when the pallet origin type has changed. + pub fn migrate_origin + codec::Decode>() { + Agenda::::translate::< + Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, _ + >(|_, agenda| Some( + agenda + .into_iter() + .map(|schedule| schedule.map(|schedule| Scheduled { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call, + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin.into(), + _phantom: Default::default(), + })) + .collect::>() + )); + } + fn resolve_time(when: DispatchTime) -> Result { let now = frame_system::Module::::block_number(); @@ -474,6 +493,12 @@ impl Module { }); Agenda::::append(when, s); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; + if index > T::MaxScheduledPerBlock::get() { + frame_support::debug::warn!( + "Warning: There are more items queued in the Scheduler than \ + expected from the runtime configuration. An update might be needed." + ); + } Self::deposit_event(RawEvent::Scheduled(when, index)); Ok((when, index)) @@ -558,6 +583,12 @@ impl Module { }; Agenda::::append(when, Some(s)); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; + if index > T::MaxScheduledPerBlock::get() { + frame_support::debug::warn!( + "Warning: There are more items queued in the Scheduler than \ + expected from the runtime configuration. An update might be needed." + ); + } let address = (when, index); Lookup::::insert(&id, &address); Self::deposit_event(RawEvent::Scheduled(when, index)); @@ -696,6 +727,7 @@ mod tests { traits::{BlakeTwo256, IdentityLookup}, }; use frame_system::{EnsureOneOf, EnsureRoot, EnsureSignedBy}; + use substrate_test_utils::assert_eq_uvec; use crate as scheduler; mod logger { @@ -799,7 +831,7 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -810,6 +842,7 @@ mod tests { } parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub const MaxScheduledPerBlock: u32 = 10; } ord_parameter_types! { pub const One: u64 = 1; @@ -822,6 +855,7 @@ mod tests { type Call = Call; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureOneOf, EnsureSignedBy>; + type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = (); } type System = system::Module; @@ -1147,8 +1181,8 @@ mod tests { #[test] fn on_initialize_weight_is_correct() { new_test_ext().execute_with(|| { - let base_weight: Weight = ::DbWeight::get().reads_writes(1, 2) + 10_000_000; - let base_multiplier = 25_000_000; + let base_weight: Weight = ::DbWeight::get().reads_writes(1, 2); + let base_multiplier = 0; let named_multiplier = ::DbWeight::get().writes(1); let periodic_multiplier = ::DbWeight::get().reads_writes(1, 1); @@ -1312,8 +1346,6 @@ mod tests { #[test] fn migration_to_v2_works() { - use substrate_test_utils::assert_eq_uvec; - new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); @@ -1415,4 +1447,118 @@ mod tests { assert_eq!(StorageVersion::get(), Releases::V2); }); } + + #[test] + fn test_migrate_origin() { + new_test_ext().execute_with(|| { + for i in 0..3u64 { + let k = i.twox_64_concat(); + let old: Vec>> = vec![ + Some(Scheduled { + maybe_id: None, + priority: i as u8 + 10, + call: Call::Logger(logger::Call::log(96, 100)), + origin: 3u32, + maybe_periodic: None, + _phantom: Default::default(), + }), + None, + Some(Scheduled { + maybe_id: Some(b"test".to_vec()), + priority: 123, + origin: 2u32, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + _phantom: Default::default(), + }), + ]; + frame_support::migration::put_storage_value( + b"Scheduler", + b"Agenda", + &k, + old, + ); + } + + impl Into for u32 { + fn into(self) -> OriginCaller { + match self { + 3u32 => system::RawOrigin::Root.into(), + 2u32 => system::RawOrigin::None.into(), + _ => unreachable!("test make no use of it"), + } + } + } + + Scheduler::migrate_origin::(); + + assert_eq_uvec!(Agenda::::iter().collect::>(), vec![ + ( + 0, + vec![ + Some(ScheduledV2::<_, _, OriginCaller, u64> { + maybe_id: None, + priority: 10, + call: Call::Logger(logger::Call::log(96, 100)), + maybe_periodic: None, + origin: system::RawOrigin::Root.into(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + origin: system::RawOrigin::None.into(), + _phantom: PhantomData::::default(), + }), + ]), + ( + 1, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 11, + call: Call::Logger(logger::Call::log(96, 100)), + maybe_periodic: None, + origin: system::RawOrigin::Root.into(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + origin: system::RawOrigin::None.into(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 2, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 12, + call: Call::Logger(logger::Call::log(96, 100)), + maybe_periodic: None, + origin: system::RawOrigin::Root.into(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(logger::Call::log(69, 1000)), + maybe_periodic: Some((456u64, 10)), + origin: system::RawOrigin::None.into(), + _phantom: PhantomData::::default(), + }), + ] + ) + ]); + }); + } } diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index 1c25b8abfdf048809951c2489adad6b0c4d05a22..b36bade8e925a2763efaa77bc4bc597722cbc21d 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-scored-pool" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for scored pools" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md new file mode 100644 index 0000000000000000000000000000000000000000..948d5b497721b7f69c8ff2308d9c2d66cd1fbc60 --- /dev/null +++ b/frame/scored-pool/README.md @@ -0,0 +1,66 @@ +# Scored Pool Module + +The module maintains a scored membership pool. Each entity in the +pool can be attributed a `Score`. From this pool a set `Members` +is constructed. This set contains the `MemberCount` highest +scoring entities. Unscored entities are never part of `Members`. + +If an entity wants to be part of the pool a deposit is required. +The deposit is returned when the entity withdraws or when it +is removed by an entity with the appropriate authority. + +Every `Period` blocks the set of `Members` is refreshed from the +highest scoring members in the pool and, no matter if changes +occurred, `T::MembershipChanged::set_members_sorted` is invoked. +On first load `T::MembershipInitialized::initialize_members` is +invoked with the initial `Members` set. + +It is possible to withdraw candidacy/resign your membership at any +time. If an entity is currently a member, this results in removal +from the `Pool` and `Members`; the entity is immediately replaced +by the next highest scoring candidate in the pool, if available. + +- [`scored_pool::Trait`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/enum.Call.html) +- [`Module`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/struct.Module.html) + +## Interface + +### Public Functions + +- `submit_candidacy` - Submit candidacy to become a member. Requires a deposit. +- `withdraw_candidacy` - Withdraw candidacy. Deposit is returned. +- `score` - Attribute a quantitative score to an entity. +- `kick` - Remove an entity from the pool and members. Deposit is returned. +- `change_member_count` - Changes the amount of candidates taken into `Members`. + +## Usage + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; +use pallet_scored_pool::{self as scored_pool}; + +pub trait Trait: scored_pool::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn candidate(origin) -> dispatch::DispatchResult { + let who = ensure_signed(origin)?; + + let _ = >::submit_candidacy( + T::Origin::from(Some(who.clone()).into()) + ); + Ok(()) + } + } +} + +``` + +## Dependencies + +This module depends on the [System module](https://docs.rs/frame-system/latest/frame_system/). + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 4581f49bbbcb69986abcd3399e2f6ea171758536..59c0dc66cca609884dfb2b0f4d93e2bf764e3d33 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -70,7 +70,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -78,6 +78,7 @@ impl frame_system::Trait for Test { } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = (); type DustRemoval = (); diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 9c0074ff6e6899f6efe7967c1ddeb28661a3b5fd..44b71bc00ba4756ce0b60bf9292ae54f8aac55d3 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -153,7 +153,7 @@ fn unscored_entities_must_not_be_used_for_filling_members() { // then // the `None` candidates should not have been filled in - assert_eq!(ScoredPool::members(), vec![]); + assert!(ScoredPool::members().is_empty()); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); }); } diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index b4150fb8e783649c938931f489c4d35da2478c8b..ea3a3d3cdf7fa4be23c5fee683cf603f14b40019 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-session" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME sessions pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,20 +15,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../timestamp" } -sp-trie = { version = "2.0.0-rc5", optional = true, default-features = false, path = "../../primitives/trie" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } +sp-trie = { version = "2.0.0", optional = true, default-features = false, path = "../../primitives/trie" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-application-crypto = { version = "2.0.0-rc5", path = "../../primitives/application-crypto" } +sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } lazy_static = "1.4.0" [features] diff --git a/frame/session/README.md b/frame/session/README.md new file mode 100644 index 0000000000000000000000000000000000000000..60da8958f73d0177be221990ea93172e30e251bf --- /dev/null +++ b/frame/session/README.md @@ -0,0 +1,83 @@ +# Session Module + +The Session module allows validators to manage their session keys, provides a function for changing +the session length, and handles session rotation. + +- [`session::Trait`](https://docs.rs/pallet-session/latest/pallet_session/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-session/latest/pallet_session/enum.Call.html) +- [`Module`](https://docs.rs/pallet-session/latest/pallet_session/struct.Module.html) + +## Overview + +### Terminology + + +- **Session:** A session is a period of time that has a constant set of validators. Validators can only join +or exit the validator set at a session change. It is measured in block numbers. The block where a session is +ended is determined by the `ShouldEndSession` trait. When the session is ending, a new validator set +can be chosen by `OnSessionEnding` implementations. +- **Session key:** A session key is actually several keys kept together that provide the various signing +functions required by network authorities/validators in pursuit of their duties. +- **Validator ID:** Every account has an associated validator ID. For some simple staking systems, this +may just be the same as the account ID. For staking systems using a stash/controller model, +the validator ID would be the stash account ID of the controller. +- **Session key configuration process:** Session keys are set using `set_keys` for use not in +the next session, but the session after next. They are stored in `NextKeys`, a mapping between +the caller's `ValidatorId` and the session keys provided. `set_keys` allows users to set their +session key prior to being selected as validator. +It is a public call since it uses `ensure_signed`, which checks that the origin is a signed account. +As such, the account ID of the origin stored in `NextKeys` may not necessarily be associated with +a block author or a validator. The session keys of accounts are removed once their account balance is zero. +- **Session length:** This pallet does not assume anything about the length of each session. +Rather, it relies on an implementation of `ShouldEndSession` to dictate a new session's start. +This pallet provides the `PeriodicSessions` struct for simple periodic sessions. +- **Session rotation configuration:** Configure as either a 'normal' (rewardable session where rewards are +applied) or 'exceptional' (slashable) session rotation. +- **Session rotation process:** At the beginning of each block, the `on_initialize` function +queries the provided implementation of `ShouldEndSession`. If the session is to end the newly +activated validator IDs and session keys are taken from storage and passed to the +`SessionHandler`. The validator set supplied by `SessionManager::new_session` and the corresponding session +keys, which may have been registered via `set_keys` during the previous session, are written +to storage where they will wait one session before being passed to the `SessionHandler` +themselves. + +### Goals + +The Session pallet is designed to make the following possible: + +- Set session keys of the validator set for upcoming sessions. +- Control the length of sessions. +- Configure and switch between either normal or exceptional session rotations. + +## Interface + +### Dispatchable Functions + +- `set_keys` - Set a validator's session keys for upcoming sessions. + +### Public Functions + +- `rotate_session` - Change to the next session. Register the new authority set. Queue changes +for next session rotation. +- `disable_index` - Disable a validator by index. +- `disable` - Disable a validator by Validator ID + +## Usage + +### Example from the FRAME + +The [Staking pallet](https://docs.rs/pallet-staking/latest/pallet_staking/) uses the Session pallet to get the validator set. + +```rust +use pallet_session as session; + +fn validators() -> Vec<::ValidatorId> { + >::validators() +} +``` + +## Related Modules + +- [Staking](https://docs.rs/pallet-staking/latest/pallet_staking/) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index e784ff16e8523545455b120ec551962e07d5fea1..dea05934cd8725e890e759abce190732ce567ce4 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -1,35 +1,36 @@ [package] name = "pallet-session-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME sessions pallet benchmarking" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/session" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../support" } -pallet-staking = { version = "2.0.0-rc5", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -pallet-session = { version = "2.0.0-rc5", default-features = false, path = "../../session" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "2.0.0", default-features = false, path = "../../system" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } +frame-support = { version = "2.0.0", default-features = false, path = "../../support" } +pallet-staking = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-session = { version = "2.0.0", default-features = false, path = "../../session" } rand = { version = "0.7.2", default-features = false } [dev-dependencies] serde = { version = "1.0.101" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../../staking/reward-curve" } -sp-io ={ version = "2.0.0-rc5", path = "../../../primitives/io" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../timestamp" } -pallet-balances = { version = "2.0.0-rc5", path = "../../balances" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +pallet-staking-reward-curve = { version = "2.0.0", path = "../../staking/reward-curve" } +sp-io ={ version = "2.0.0", path = "../../../primitives/io" } +pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } +pallet-balances = { version = "2.0.0", path = "../../balances" } [features] default = ["std"] diff --git a/frame/session/benchmarking/README.md b/frame/session/benchmarking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d034a9ec73284fee91c5a2a8c397f5d4bc551a0e --- /dev/null +++ b/frame/session/benchmarking/README.md @@ -0,0 +1,3 @@ +Benchmarks for the Session Pallet. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index cc471893356d54eee36ec998e0bf8b6c5ff2c57f..6fcdeaafc997ed70d604cf12bc70eae0b796fbac 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -28,14 +28,14 @@ use sp_std::vec; use frame_benchmarking::benchmarks; use frame_support::{ codec::Decode, - storage::StorageValue, + storage::{StorageValue, StorageMap}, traits::{KeyOwnerProofSystem, OnInitialize}, }; use frame_system::RawOrigin; use pallet_session::{historical::Module as Historical, Module as Session, *}; use pallet_staking::{ benchmarking::create_validator_with_nominators, testing_utils::create_validators, - MAX_NOMINATIONS, + MAX_NOMINATIONS, RewardDestination, }; use sp_runtime::traits::{One, StaticLookup}; @@ -54,22 +54,34 @@ benchmarks! { _ { } set_keys { - let n in 1 .. MAX_NOMINATIONS as u32; - let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32, false)?; + let n = MAX_NOMINATIONS as u32; + let v_stash = create_validator_with_nominators::( + n, + MAX_NOMINATIONS as u32, + false, + RewardDestination::Staked, + )?; let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; + // Whitelist controller account from further DB operations. + let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); + frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); }: _(RawOrigin::Signed(v_controller), keys, proof) purge_keys { - let n in 1 .. MAX_NOMINATIONS as u32; - let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32, false)?; + let n = MAX_NOMINATIONS as u32; + let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32, false, RewardDestination::Staked)?; let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; + // Whitelist controller account from further DB operations. + let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); + frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); }: _(RawOrigin::Signed(v_controller)) + #[extra] check_membership_proof_current_session { let n in 2 .. MAX_VALIDATORS as u32; @@ -82,6 +94,7 @@ benchmarks! { assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } + #[extra] check_membership_proof_historical_session { let n in 2 .. MAX_VALIDATORS as u32; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index d4eac4247734f420655d724cd267c7603d3c297d..645ac14d88a441ee0a3ae4d8679ecc549fcae180 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -78,7 +78,7 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = (); type MaximumBlockLength = (); type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = Balances; @@ -88,6 +88,7 @@ parameter_types! { pub const ExistentialDeposit: Balance = 10; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = Balance; type Event = (); type DustRemoval = (); diff --git a/frame/session/src/default_weights.rs b/frame/session/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..f3082981c78bf1bbff7e6ed08aecf9ded81af982 --- /dev/null +++ b/frame/session/src/default_weights.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn set_keys() -> Weight { + (88_411_000 as Weight) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn purge_keys() -> Weight { + (51_843_000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } +} diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 2c1cba7137dccc63d44f33af1cd3e4ca314d3194..1d81f38bdf87b38e97650fda18af1ef77f40b735 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -123,6 +123,8 @@ mod tests; #[cfg(feature = "historical")] pub mod historical; +mod default_weights; + /// Decides whether the session should be ended. pub trait ShouldEndSession { /// Return `true` if the session should be ended. @@ -352,13 +354,8 @@ impl ValidatorRegistration for Module { } pub trait WeightInfo { - fn set_keys(n: u32, ) -> Weight; - fn purge_keys(n: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn set_keys(_n: u32, ) -> Weight { 1_000_000_000 } - fn purge_keys(_n: u32, ) -> Weight { 1_000_000_000 } + fn set_keys() -> Weight; + fn purge_keys() -> Weight; } pub trait Trait: frame_system::Trait { @@ -484,7 +481,7 @@ decl_storage! { decl_event!( pub enum Event { - /// New session has happened. Note that the argument is the [session_index], not the block + /// New session has happened. Note that the argument is the \[session_index\], not the block /// number as the type might suggest. NewSession(SessionIndex), } @@ -524,9 +521,7 @@ decl_module! { /// - DbReads per key id: `KeyOwner` /// - DbWrites per key id: `KeyOwner` /// # - #[weight = 200_000_000 - + T::DbWeight::get().reads(2 + T::Keys::key_ids().len() as Weight) - + T::DbWeight::get().writes(1 + T::Keys::key_ids().len() as Weight)] + #[weight = T::WeightInfo::set_keys()] pub fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; @@ -549,8 +544,7 @@ decl_module! { /// - DbWrites: `NextKeys`, `origin account` /// - DbWrites per key id: `KeyOwnder` /// # - #[weight = 120_000_000 - + T::DbWeight::get().reads_writes(2, 1 + T::Keys::key_ids().len() as Weight)] + #[weight = T::WeightInfo::purge_keys()] pub fn purge_keys(origin) { let who = ensure_signed(origin)?; Self::do_purge_keys(&who)?; diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index bd94264b155602e697f664ddf58a1d7ae1768299..1d787ac53b43854dee6f6ca34ace0df1172737c4 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -193,7 +193,7 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index 229191c3ccb8013edf2813e2c4e2c4a508c3480e..2f3f3adabc2c2d8de8540ed1d8884e741091dfd7 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-society" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME society pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,16 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } rand_chacha = { version = "0.2", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "2.0.0", path = "../../primitives/io" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/society/README.md b/frame/society/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b4e1fbaf22cbac1ed5fadd9a55a07864fb8274a9 --- /dev/null +++ b/frame/society/README.md @@ -0,0 +1,228 @@ +# Society Module + +- [`society::Trait`](https://docs.rs/pallet-society/latest/pallet_society/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-society/latest/pallet_society/enum.Call.html) + +## Overview + +The Society module is an economic game which incentivizes users to participate +and maintain a membership society. + +### User Types + +At any point, a user in the society can be one of a: +* Bidder - A user who has submitted intention of joining the society. +* Candidate - A user who will be voted on to join the society. +* Suspended Candidate - A user who failed to win a vote. +* Member - A user who is a member of the society. +* Suspended Member - A member of the society who has accumulated too many strikes +or failed their membership challenge. + +Of the non-suspended members, there is always a: +* Head - A member who is exempt from suspension. +* Defender - A member whose membership is under question and voted on again. + +Of the non-suspended members of the society, a random set of them are chosen as +"skeptics". The mechanics of skeptics is explained in the +[member phase](#member-phase) below. + +### Mechanics + +#### Rewards + +Members are incentivized to participate in the society through rewards paid +by the Society treasury. These payments have a maturity period that the user +must wait before they are able to access the funds. + +#### Punishments + +Members can be punished by slashing the reward payouts that have not been +collected. Additionally, members can accumulate "strikes", and when they +reach a max strike limit, they become suspended. + +#### Skeptics + +During the voting period, a random set of members are selected as "skeptics". +These skeptics are expected to vote on the current candidates. If they do not vote, +their skeptic status is treated as a rejection vote, the member is deemed +"lazy", and are given a strike per missing vote. + +#### Membership Challenges + +Every challenge rotation period, an existing member will be randomly selected +to defend their membership into society. Then, other members can vote whether +this defender should stay in society. A simple majority wins vote will determine +the outcome of the user. Ties are treated as a failure of the challenge, but +assuming no one else votes, the defender always get a free vote on their +own challenge keeping them in the society. The Head member is exempt from the +negative outcome of a membership challenge. + +#### Society Treasury + +The membership society is independently funded by a treasury managed by this +module. Some subset of this treasury is placed in a Society Pot, which is used +to determine the number of accepted bids. + +#### Rate of Growth + +The membership society can grow at a rate of 10 accepted candidates per rotation period up +to the max membership threshold. Once this threshold is met, candidate selections +are stalled until there is space for new members to join. This can be resolved by +voting out existing members through the random challenges or by using governance +to increase the maximum membership count. + +### User Life Cycle + +A user can go through the following phases: + +```rust + +-------> User <----------+ + | + | + | | | ++----------------------------------------------+ +| | | | | +| | v | | +| | Bidder <-----------+ | +| | + | | +| | | + | +| | v Suspended | +| | Candidate +----> Candidate | +| | + + | +| | | | | +| + | | | +| Suspended +------>| | | +| Member | | | +| ^ | | | +| | v | | +| +-------+ Member <----------+ | +| | +| | ++------------------Society---------------------+ +``` + +#### Initialization + +The society is initialized with a single member who is automatically chosen as the Head. + +#### Bid Phase + +New users must have a bid to join the society. + +A user can make a bid by reserving a deposit. Alternatively, an already existing member +can create a bid on a user's behalf by "vouching" for them. + +A bid includes reward information that the user would like to receive for joining +the society. A vouching bid can additionally request some portion of that reward as a tip +to the voucher for vouching for the prospective candidate. + +Every rotation period, Bids are ordered by reward amount, and the module +selects as many bids the Society Pot can support for that period. + +These selected bids become candidates and move on to the Candidate phase. +Bids that were not selected stay in the bidder pool until they are selected or +a user chooses to "unbid". + +#### Candidate Phase + +Once a bidder becomes a candidate, members vote whether to approve or reject +that candidate into society. This voting process also happens during a rotation period. + +The approval and rejection criteria for candidates are not set on chain, +and may change for different societies. + +At the end of the rotation period, we collect the votes for a candidate +and randomly select a vote as the final outcome. + +```rust + [ a-accept, r-reject, s-skeptic ] ++----------------------------------+ +| | +| Member |0|1|2|3|4|5|6|7|8|9| | +| ----------------------------- | +| Vote |a|a|a|r|s|r|a|a|s|a| | +| ----------------------------- | +| Selected | | | |x| | | | | | | | +| | ++----------------------------------+ + +Result: Rejected +``` + +Each member that voted opposite to this randomly selected vote is punished by +slashing their unclaimed payouts and increasing the number of strikes they have. + +These slashed funds are given to a random user who voted the same as the +selected vote as a reward for participating in the vote. + +If the candidate wins the vote, they receive their bid reward as a future payout. +If the bid was placed by a voucher, they will receive their portion of the reward, +before the rest is paid to the winning candidate. + +One winning candidate is selected as the Head of the members. This is randomly +chosen, weighted by the number of approvals the winning candidates accumulated. + +If the candidate loses the vote, they are suspended and it is up to the Suspension +Judgement origin to determine if the candidate should go through the bidding process +again, should be accepted into the membership society, or rejected and their deposit +slashed. + +#### Member Phase + +Once a candidate becomes a member, their role is to participate in society. + +Regular participation involves voting on candidates who want to join the membership +society, and by voting in the right way, a member will accumulate future payouts. +When a payout matures, members are able to claim those payouts. + +Members can also vouch for users to join the society, and request a "tip" from +the fees the new member would collect by joining the society. This vouching +process is useful in situations where a user may not have enough balance to +satisfy the bid deposit. A member can only vouch one user at a time. + +During rotation periods, a random group of members are selected as "skeptics". +These skeptics are expected to vote on the current candidates. If they do not vote, +their skeptic status is treated as a rejection vote, the member is deemed +"lazy", and are given a strike per missing vote. + +There is a challenge period in parallel to the rotation period. During a challenge period, +a random member is selected to defend their membership to the society. Other members +make a traditional majority-wins vote to determine if the member should stay in the society. +Ties are treated as a failure of the challenge. + +If a member accumulates too many strikes or fails their membership challenge, +they will become suspended. While a member is suspended, they are unable to +claim matured payouts. It is up to the Suspension Judgement origin to determine +if the member should re-enter society or be removed from society with all their +future payouts slashed. + +## Interface + +### Dispatchable Functions + +#### For General Users + +* `bid` - A user can make a bid to join the membership society by reserving a deposit. +* `unbid` - A user can withdraw their bid for entry, the deposit is returned. + +#### For Members + +* `vouch` - A member can place a bid on behalf of a user to join the membership society. +* `unvouch` - A member can revoke their vouch for a user. +* `vote` - A member can vote to approve or reject a candidate's request to join the society. +* `defender_vote` - A member can vote to approve or reject a defender's continued membership +to the society. +* `payout` - A member can claim their first matured payment. +* `unfound` - Allow the founder to unfound the society when they are the only member. + +#### For Super Users + +* `found` - The founder origin can initiate this society. Useful for bootstrapping the Society +pallet on an already running chain. +* `judge_suspended_member` - The suspension judgement origin is able to make +judgement on a suspended member. +* `judge_suspended_candidate` - The suspension judgement origin is able to +make judgement on a suspended candidate. +* `set_max_membership` - The ROOT origin can update the maximum member count for the society. +The max membership count must be greater than 1. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 69ba46c83295563ca71b4301090ba2226dd8ecc7..cbfe5a00de240ea8ce8217dbbfb26ac8d1d1b932 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -1111,40 +1111,40 @@ decl_event! { AccountId = ::AccountId, Balance = BalanceOf { - /// The society is founded by the given identity. [founder] + /// The society is founded by the given identity. \[founder\] Founded(AccountId), /// A membership bid just happened. The given account is the candidate's ID and their offer - /// is the second. [candidate_id, offer] + /// is the second. \[candidate_id, offer\] Bid(AccountId, Balance), /// A membership bid just happened by vouching. The given account is the candidate's ID and - /// their offer is the second. The vouching party is the third. [candidate_id, offer, vouching] + /// their offer is the second. The vouching party is the third. \[candidate_id, offer, vouching\] Vouch(AccountId, Balance, AccountId), - /// A [candidate] was dropped (due to an excess of bids in the system). + /// A \[candidate\] was dropped (due to an excess of bids in the system). AutoUnbid(AccountId), - /// A [candidate] was dropped (by their request). + /// A \[candidate\] was dropped (by their request). Unbid(AccountId), - /// A [candidate] was dropped (by request of who vouched for them). + /// A \[candidate\] was dropped (by request of who vouched for them). Unvouch(AccountId), /// A group of candidates have been inducted. The batch's primary is the first value, the - /// batch in full is the second. [primary, candidates] + /// batch in full is the second. \[primary, candidates\] Inducted(AccountId, Vec), - /// A suspended member has been judged. [who, judged] + /// A suspended member has been judged. \[who, judged\] SuspendedMemberJudgement(AccountId, bool), - /// A [candidate] has been suspended + /// A \[candidate\] has been suspended CandidateSuspended(AccountId), - /// A [member] has been suspended + /// A \[member\] has been suspended MemberSuspended(AccountId), - /// A [member] has been challenged + /// A \[member\] has been challenged Challenged(AccountId), - /// A vote has been placed [candidate, voter, vote] + /// A vote has been placed \[candidate, voter, vote\] Vote(AccountId, AccountId, bool), - /// A vote has been placed for a defending member [voter, vote] + /// A vote has been placed for a defending member \[voter, vote\] DefenderVote(AccountId, bool), - /// A new [max] member count has been set + /// A new \[max\] member count has been set NewMaxMembers(u32), - /// Society is unfounded. [founder] + /// Society is unfounded. \[founder\] Unfounded(AccountId), - /// Some funds were deposited into the society account. [value] + /// Some funds were deposited into the society account. \[value\] Deposit(Balance), } } diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index f29dbc8cb17ad5aee8821f8888d89f85c2eee910..212bcfd404ff152dc47be29bb063e891f91910bd 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -20,7 +20,8 @@ use super::*; use frame_support::{ - impl_outer_origin, parameter_types, ord_parameter_types, traits::{OnInitialize, OnFinalize} + impl_outer_origin, parameter_types, ord_parameter_types, + traits::{OnInitialize, OnFinalize, TestRandomness}, }; use sp_core::H256; use sp_runtime::{ @@ -80,7 +81,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type OnNewAccount = (); type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; @@ -88,6 +89,7 @@ impl frame_system::Trait for Test { } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = (); type DustRemoval = (); @@ -99,7 +101,7 @@ impl pallet_balances::Trait for Test { impl Trait for Test { type Event = (); type Currency = pallet_balances::Module; - type Randomness = (); + type Randomness = TestRandomness; type CandidateDeposit = CandidateDeposit; type WrongSideDeduction = WrongSideDeduction; type MaxStrikes = MaxStrikes; diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index f0bc0c0ac7ca65b9909647ab5866f3b0ce10d279..88b8c1270a4e1e615a3d0b33406dfcfed861ac93 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-staking" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet staking" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,32 +16,32 @@ targets = ["x86_64-unknown-linux-gnu"] static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-npos-elections = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/npos-elections" } -sp-io ={ version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0-rc5", default-features = false, features = ["historical"], path = "../session" } -pallet-authorship = { version = "2.0.0-rc5", default-features = false, path = "../authorship" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-npos-elections = { version = "2.0.0", default-features = false, path = "../../primitives/npos-elections" } +sp-io ={ version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0", default-features = false, features = ["historical"], path = "../session" } +pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } # Optional imports for benchmarking -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../timestamp" } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../staking/reward-curve" } -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } -frame-benchmarking = { version = "2.0.0-rc5", path = "../benchmarking" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-timestamp = { version = "2.0.0", path = "../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } +substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } rand_chacha = { version = "0.2" } parking_lot = "0.10.2" -env_logger = "0.7.1" hex = "0.4" [features] diff --git a/frame/staking/README.md b/frame/staking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b7b2141e58a5bae7963013407b36ce19faab70fa --- /dev/null +++ b/frame/staking/README.md @@ -0,0 +1,249 @@ +# Staking Module + +The Staking module is used to manage funds at stake by network maintainers. + +- [`staking::Trait`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html) +- [`Module`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Module.html) + +## Overview + +The Staking module is the means by which a set of network maintainers (known as _authorities_ in +some contexts and _validators_ in others) are chosen based upon those who voluntarily place +funds under deposit. Under deposit, those funds are rewarded under normal operation but are held +at pain of _slash_ (expropriation) should the staked maintainer be found not to be discharging +its duties properly. + +### Terminology + + +- Staking: The process of locking up funds for some time, placing them at risk of slashing + (loss) in order to become a rewarded maintainer of the network. +- Validating: The process of running a node to actively maintain the network, either by + producing blocks or guaranteeing finality of the chain. +- Nominating: The process of placing staked funds behind one or more validators in order to + share in any reward, and punishment, they take. +- Stash account: The account holding an owner's funds used for staking. +- Controller account: The account that controls an owner's funds for staking. +- Era: A (whole) number of sessions, which is the period that the validator set (and each + validator's active nominator set) is recalculated and where rewards are paid out. +- Slash: The punishment of a staker by reducing its funds. + +### Goals + + +The staking system in Substrate NPoS is designed to make the following possible: + +- Stake funds that are controlled by a cold wallet. +- Withdraw some, or deposit more, funds without interrupting the role of an entity. +- Switch between roles (nominator, validator, idle) with minimal overhead. + +### Scenarios + +#### Staking + +Almost any interaction with the Staking module requires a process of _**bonding**_ (also known +as being a _staker_). To become *bonded*, a fund-holding account known as the _stash account_, +which holds some or all of the funds that become frozen in place as part of the staking process, +is paired with an active **controller** account, which issues instructions on how they shall be +used. + +An account pair can become bonded using the [`bond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.bond) call. + +Stash accounts can change their associated controller using the +[`set_controller`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.set_controller) call. + +There are three possible roles that any staked account pair can be in: `Validator`, `Nominator` +and `Idle` (defined in [`StakerStatus`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.StakerStatus.html)). There are three +corresponding instructions to change between roles, namely: +[`validate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.validate), +[`nominate`](./enum.Call.html#variant.nominate), and [`chill`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.chill). + +#### Validating + +A **validator** takes the role of either validating blocks or ensuring their finality, +maintaining the veracity of the network. A validator should avoid both any sort of malicious +misbehavior and going offline. Bonded accounts that state interest in being a validator do NOT +get immediately chosen as a validator. Instead, they are declared as a _candidate_ and they +_might_ get elected at the _next era_ as a validator. The result of the election is determined +by nominators and their votes. + +An account can become a validator candidate via the +[`validate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.validate) call. + +#### Nomination + +A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on +a set of validators to be elected. Once interest in nomination is stated by an account, it +takes effect at the next election round. The funds in the nominator's stash account indicate the +_weight_ of its vote. Both the rewards and any punishment that a validator earns are shared +between the validator and its nominators. This rule incentivizes the nominators to NOT vote for +the misbehaving/offline validators as much as possible, simply because the nominators will also +lose funds if they vote poorly. + +An account can become a nominator via the [`nominate`](enum.Call.html#variant.nominate) call. + +#### Rewards and Slash + +The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace +valid behavior_ while _punishing any misbehavior or lack of availability_. + +Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the +`payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the +validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each +nominator's account. + +Slashing can occur at any point in time, once misbehavior is reported. Once slashing is +determined, a value is deducted from the balance of the validator and all the nominators who +voted for this validator (values are deducted from the _stash_ account of the slashed entity). + +Slashing logic is further described in the documentation of the `slashing` module. + +Similar to slashing, rewards are also shared among a validator and its associated nominators. +Yet, the reward funds are not always transferred to the stash account and can be configured. See +[Reward Calculation](#reward-calculation) for more details. + +#### Chilling + +Finally, any of the roles above can choose to step back temporarily and just chill for a while. +This means that if they are a nominator, they will not be considered as voters anymore and if +they are validators, they will no longer be a candidate for the next election. + +An account can step back via the [`chill`](enum.Call.html#variant.chill) call. + +### Session managing + +The module implement the trait `SessionManager`. Which is the only API to query new validator +set and allowing these validator set to be rewarded once their era is ended. + +## Interface + +### Dispatchable Functions + +The dispatchable functions of the Staking module enable the steps needed for entities to accept +and change their role, alongside some helper functions to get/set the metadata of the module. + +### Public Functions + +The Staking module contains many public storage items and (im)mutable functions. + +## Usage + +### Example: Rewarding a validator by id. + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; +use pallet_staking::{self as staking}; + +pub trait Trait: staking::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + /// Reward a validator. + #[weight = 0] + pub fn reward_myself(origin) -> dispatch::DispatchResult { + let reported = ensure_signed(origin)?; + >::reward_by_ids(vec![(reported, 10)]); + Ok(()) + } + } +} +``` + +## Implementation Details + +### Era payout + +The era payout is computed using yearly inflation curve defined at +[`T::RewardCurve`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.RewardCurve) as such: + +```nocompile +staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year +``` +This payout is used to reward stakers as defined in next section + +```nocompile +remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout +``` +The remaining reward is send to the configurable end-point +[`T::RewardRemainder`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.RewardRemainder). + +### Reward Calculation + +Validators and nominators are rewarded at the end of each era. The total reward of an era is +calculated using the era duration and the staking rate (the total amount of tokens staked by +nominators and validators, divided by the total token supply). It aims to incentivize toward a +defined staking rate. The full specification can be found +[here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model). + +Total reward is split among validators and their nominators depending on the number of points +they received during the era. Points are added to a validator using +[`reward_by_ids`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.reward_by_ids) or +[`reward_by_indices`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.reward_by_indices). + +[`Module`](./struct.Module.html) implements +[`pallet_authorship::EventHandler`](https://docs.rs/pallet-authorship/latest/pallet_authorship/trait.EventHandler.html) to add reward +points to block producer and block producer of referenced uncles. + +The validator and its nominator split their reward as following: + +The validator can declare an amount, named +[`commission`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.ValidatorPrefs.html#structfield.commission), that does not get shared +with the nominators at each reward payout through its +[`ValidatorPrefs`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.ValidatorPrefs.html). This value gets deducted from the total reward +that is paid to the validator and its nominators. The remaining portion is split among the +validator and all of the nominators that nominated the validator, proportional to the value +staked behind this validator (_i.e._ dividing the +[`own`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.own) or +[`others`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.others) by +[`total`](./struct.Exposure.html#structfield.total) in [`Exposure`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html)). + +All entities who receive a reward have the option to choose their reward destination through the +[`Payee`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Payee.html) storage item (see +[`set_payee`](enum.Call.html#variant.set_payee)), to be one of the following: + +- Controller account, (obviously) not increasing the staked value. +- Stash account, not increasing the staked value. +- Stash account, also increasing the staked value. + +### Additional Fund Management Operations + +Any funds already placed into stash can be the target of the following operations: + +The controller account can free a portion (or all) of the funds using the +[`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately +accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.BondingDuration.html) +(in number of eras) must pass until the funds can actually be removed. Once the +`BondingDuration` is over, the [`withdraw_unbonded`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.withdraw_unbonded) +call can be used to actually withdraw the funds. + +Note that there is a limitation to the number of fund-chunks that can be scheduled to be +unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum +(`MAX_UNLOCKING_CHUNKS`) is reached, the bonded account _must_ first wait until a successful +call to `withdraw_unbonded` to remove some of the chunks. + +### Election Algorithm + +The current election algorithm is implemented based on Phragmén. The reference implementation +can be found [here](https://github.com/w3f/consensus/tree/master/NPoS). + +The election algorithm, aside from electing the validators with the most stake value and votes, +tries to divide the nominator votes among candidates in an equal manner. To further assure this, +an optional post-processing can be applied that iteratively normalizes the nominator staked +values until the total difference among votes of a particular nominator are less than a +threshold. + +## GenesisConfig + +The Staking module depends on the [`GenesisConfig`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.GenesisConfig.html). The +`GenesisConfig` is optional and allow to set some initial stakers. + +## Related Modules + +- [Balances](https://docs.rs/pallet-balances/latest/pallet_balances/): Used to manage values at stake. +- [Session](https://docs.rs/pallet-session/latest/pallet_session/): Used to manage sessions. Also, a list of new + validators is stored in the Session module's `Validators` at the end of each era. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml index 832ac622a976a3c7e87fe7c079a7eda219f3b3c2..e1431aa54d4a7caad72246f78291396ced028705 100644 --- a/frame/staking/fuzzer/Cargo.toml +++ b/frame/staking/fuzzer/Cargo.toml @@ -15,19 +15,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -pallet-staking = { version = "2.0.0-rc5", path = "..", features = ["runtime-benchmarks"] } -pallet-staking-reward-curve = { version = "2.0.0-rc5", path = "../reward-curve" } -pallet-session = { version = "2.0.0-rc5", path = "../../session" } -pallet-indices = { version = "2.0.0-rc5", path = "../../indices" } -pallet-balances = { version = "2.0.0-rc5", path = "../../balances" } -pallet-timestamp = { version = "2.0.0-rc5", path = "../../timestamp" } -frame-system = { version = "2.0.0-rc5", path = "../../system" } -frame-support = { version = "2.0.0-rc5", path = "../../support" } -sp-std = { version = "2.0.0-rc5", path = "../../../primitives/std" } -sp-io ={ version = "2.0.0-rc5", path = "../../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-npos-elections = { version = "2.0.0-rc5", path = "../../../primitives/npos-elections" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0", path = "..", features = ["runtime-benchmarks"] } +pallet-staking-reward-curve = { version = "2.0.0", path = "../reward-curve" } +pallet-session = { version = "2.0.0", path = "../../session" } +pallet-indices = { version = "2.0.0", path = "../../indices" } +pallet-balances = { version = "2.0.0", path = "../../balances" } +pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } +frame-system = { version = "2.0.0", path = "../../system" } +frame-support = { version = "2.0.0", path = "../../support" } +sp-std = { version = "2.0.0", path = "../../../primitives/std" } +sp-io ={ version = "2.0.0", path = "../../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-npos-elections = { version = "2.0.0", path = "../../../primitives/npos-elections" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } [[bin]] name = "submit_solution" diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 1f5b29b56b6b60d58e4b9b0703011472b22cf666..aae044d75acd9538d9ce4bdc4c2d069ea9866693 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -77,7 +77,7 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = (); type MaximumBlockLength = (); type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (Balances,); @@ -87,6 +87,7 @@ parameter_types! { pub const ExistentialDeposit: Balance = 10; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = Balance; type Event = (); type DustRemoval = (); diff --git a/frame/staking/fuzzer/src/submit_solution.rs b/frame/staking/fuzzer/src/submit_solution.rs index 6812a739c4929158d3b1d9986e5fbbe786b9c3f6..9158331726ab395b947a9c18fe80089456450bcc 100644 --- a/frame/staking/fuzzer/src/submit_solution.rs +++ b/frame/staking/fuzzer/src/submit_solution.rs @@ -166,7 +166,7 @@ fn main() { DispatchError::Module { index: 0, error: 16, - message: Some("PhragmenWeakSubmission"), + message: Some("OffchainElectionWeakSubmission"), }, ); }, diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 25cbffda1de9304a326aaac672292799cfba51a2..19f7e51b8f6ce19b5b09d4770055d8c1a1f7a3e2 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking-reward-curve" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -21,4 +21,4 @@ proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" [dev-dependencies] -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index aab92ef4ce5a1440efee721e011cbdd538d28d2f..afda58db4672fa1fbce46736186c54857ba9230e 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -23,12 +23,20 @@ use testing_utils::*; use sp_runtime::traits::One; use frame_system::RawOrigin; -pub use frame_benchmarking::{benchmarks, account}; +pub use frame_benchmarking::{benchmarks, account, whitelisted_caller}; const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_VALIDATORS: u32 = 1000; const MAX_SLASHES: u32 = 1000; +macro_rules! do_whitelist { + ($acc:ident) => { + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::Account::::hashed_key_for(&$acc).into() + ); + } +} + // Add slashing spans to a user account. Not relevant for actual use, only to benchmark // read and write operations. fn add_slashing_spans(who: &T::AccountId, spans: u32) { @@ -51,11 +59,12 @@ pub fn create_validator_with_nominators( n: u32, upper_bound: u32, dead: bool, + destination: RewardDestination ) -> Result { let mut points_total = 0; let mut points_individual = Vec::new(); - let (v_stash, v_controller) = create_stash_controller::(0, 100)?; + let (v_stash, v_controller) = create_stash_controller::(0, 100, destination.clone())?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), }; @@ -68,9 +77,9 @@ pub fn create_validator_with_nominators( // Give the validator n nominators, but keep total users in the system the same. for i in 0 .. upper_bound { let (_n_stash, n_controller) = if !dead { - create_stash_controller::(u32::max_value() - i, 100)? + create_stash_controller::(u32::max_value() - i, 100, destination.clone())? } else { - create_stash_and_dead_controller::(u32::max_value() - i, 100)? + create_stash_and_dead_controller::(u32::max_value() - i, 100, destination.clone())? }; if i < n { Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), vec![stash_lookup.clone()])?; @@ -100,19 +109,18 @@ pub fn create_validator_with_nominators( Ok(v_stash) } +const USER_SEED: u32 = 999666; + benchmarks! { - _{ - // User account seed - let u in 0 .. 1000 => (); - } + _{} bond { - let u in ...; - let stash = create_funded_user::("stash", u, 100); - let controller = create_funded_user::("controller", u, 100); + let stash = create_funded_user::("stash", USER_SEED, 100); + let controller = create_funded_user::("controller", USER_SEED, 100); let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); let reward_destination = RewardDestination::Staked; let amount = T::Currency::minimum_balance() * 10.into(); + do_whitelist!(stash); }: _(RawOrigin::Signed(stash.clone()), controller_lookup, amount, reward_destination) verify { assert!(Bonded::::contains_key(stash)); @@ -120,11 +128,11 @@ benchmarks! { } bond_extra { - let u in ...; - let (stash, controller) = create_stash_controller::(u, 100)?; + let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; let max_additional = T::Currency::minimum_balance() * 10.into(); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_bonded: BalanceOf = ledger.active; + do_whitelist!(stash); }: _(RawOrigin::Signed(stash), max_additional) verify { let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; @@ -133,11 +141,11 @@ benchmarks! { } unbond { - let u in ...; - let (_, controller) = create_stash_controller::(u, 100)?; + let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; let amount = T::Currency::minimum_balance() * 10.into(); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_bonded: BalanceOf = ledger.active; + do_whitelist!(controller); }: _(RawOrigin::Signed(controller.clone()), amount) verify { let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; @@ -149,13 +157,14 @@ benchmarks! { withdraw_unbonded_update { // Slashing Spans let s in 0 .. MAX_SPANS; - let (stash, controller) = create_stash_controller::(0, 100)?; + let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; add_slashing_spans::(&stash, s); let amount = T::Currency::minimum_balance() * 5.into(); // Half of total Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; CurrentEra::put(EraIndex::max_value()); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_total: BalanceOf = ledger.total; + do_whitelist!(controller); }: withdraw_unbonded(RawOrigin::Signed(controller.clone()), s) verify { let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; @@ -167,22 +176,23 @@ benchmarks! { withdraw_unbonded_kill { // Slashing Spans let s in 0 .. MAX_SPANS; - let (stash, controller) = create_stash_controller::(0, 100)?; + let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; add_slashing_spans::(&stash, s); let amount = T::Currency::minimum_balance() * 10.into(); Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; CurrentEra::put(EraIndex::max_value()); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_total: BalanceOf = ledger.total; + do_whitelist!(controller); }: withdraw_unbonded(RawOrigin::Signed(controller.clone()), s) verify { assert!(!Ledger::::contains_key(controller)); } validate { - let u in ...; - let (stash, controller) = create_stash_controller::(u, 100)?; + let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; let prefs = ValidatorPrefs::default(); + do_whitelist!(controller); }: _(RawOrigin::Signed(controller), prefs) verify { assert!(Validators::::contains_key(stash)); @@ -191,51 +201,52 @@ benchmarks! { // Worst case scenario, MAX_NOMINATIONS nominate { let n in 1 .. MAX_NOMINATIONS as u32; - let (stash, controller) = create_stash_controller::(n + 1, 100)?; + let (stash, controller) = create_stash_controller::(n + 1, 100, Default::default())?; let validators = create_validators::(n, 100)?; + do_whitelist!(controller); }: _(RawOrigin::Signed(controller), validators) verify { assert!(Nominators::::contains_key(stash)); } chill { - let u in ...; - let (_, controller) = create_stash_controller::(u, 100)?; + let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + do_whitelist!(controller); }: _(RawOrigin::Signed(controller)) set_payee { - let u in ...; - let (stash, controller) = create_stash_controller::(u, 100)?; + let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; assert_eq!(Payee::::get(&stash), RewardDestination::Staked); + do_whitelist!(controller); }: _(RawOrigin::Signed(controller), RewardDestination::Controller) verify { assert_eq!(Payee::::get(&stash), RewardDestination::Controller); } set_controller { - let u in ...; - let (stash, _) = create_stash_controller::(u, 100)?; - let new_controller = create_funded_user::("new_controller", u, 100); + let (stash, _) = create_stash_controller::(USER_SEED, 100, Default::default())?; + let new_controller = create_funded_user::("new_controller", USER_SEED, 100); let new_controller_lookup = T::Lookup::unlookup(new_controller.clone()); + do_whitelist!(stash); }: _(RawOrigin::Signed(stash), new_controller_lookup) verify { assert!(Ledger::::contains_key(&new_controller)); } set_validator_count { - let c in 0 .. MAX_VALIDATORS; - }: _(RawOrigin::Root, c) + let validator_count = MAX_VALIDATORS; + }: _(RawOrigin::Root, validator_count) verify { - assert_eq!(ValidatorCount::get(), c); + assert_eq!(ValidatorCount::get(), validator_count); } - force_no_eras { let i in 0 .. 1; }: _(RawOrigin::Root) + force_no_eras {}: _(RawOrigin::Root) verify { assert_eq!(ForceEra::get(), Forcing::ForceNone); } - force_new_era {let i in 0 .. 1; }: _(RawOrigin::Root) + force_new_era {}: _(RawOrigin::Root) verify { assert_eq!(ForceEra::get(), Forcing::ForceNew); } - force_new_era_always { let i in 0 .. 1; }: _(RawOrigin::Root) + force_new_era_always {}: _(RawOrigin::Root) verify { assert_eq!(ForceEra::get(), Forcing::ForceAlways); } // Worst case scenario, the list of invulnerables is very long. @@ -253,7 +264,7 @@ benchmarks! { force_unstake { // Slashing Spans let s in 0 .. MAX_SPANS; - let (stash, controller) = create_stash_controller::(0, 100)?; + let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; add_slashing_spans::(&stash, s); }: _(RawOrigin::Root, stash, s) verify { @@ -275,37 +286,60 @@ benchmarks! { assert_eq!(UnappliedSlashes::::get(&era).len(), (MAX_SLASHES - s) as usize); } - payout_stakers { + payout_stakers_dead_controller { let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; - let validator = create_validator_with_nominators::(n, T::MaxNominatorRewardedPerValidator::get() as u32, true)?; + let validator = create_validator_with_nominators::( + n, + T::MaxNominatorRewardedPerValidator::get() as u32, + true, + RewardDestination::Controller, + )?; let current_era = CurrentEra::get().unwrap(); - let caller = account("caller", 0, SEED); - let balance_before = T::Currency::free_balance(&validator); - }: _(RawOrigin::Signed(caller), validator.clone(), current_era) + // set the commission for this particular era as well. + >::insert(current_era, validator.clone(), >::validators(&validator)); + + let caller = whitelisted_caller(); + let validator_controller = >::get(&validator).unwrap(); + let balance_before = T::Currency::free_balance(&validator_controller); + }: payout_stakers(RawOrigin::Signed(caller), validator.clone(), current_era) verify { - // Validator has been paid! - let balance_after = T::Currency::free_balance(&validator); - assert!(balance_before < balance_after); + let balance_after = T::Currency::free_balance(&validator_controller); + assert!( + balance_before < balance_after, + "Balance of controller {:?} should have increased after payout.", + validator, + ); } - payout_stakers_alive_controller { + payout_stakers_alive_staked { let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; - let validator = create_validator_with_nominators::(n, T::MaxNominatorRewardedPerValidator::get() as u32, false)?; + let validator = create_validator_with_nominators::( + n, + T::MaxNominatorRewardedPerValidator::get() as u32, + false, + RewardDestination::Staked, + )?; let current_era = CurrentEra::get().unwrap(); - let caller = account("caller", 0, SEED); + // set the commission for this particular era as well. + >::insert(current_era, validator.clone(), >::validators(&validator)); + + let caller = whitelisted_caller(); let balance_before = T::Currency::free_balance(&validator); }: payout_stakers(RawOrigin::Signed(caller), validator.clone(), current_era) verify { - // Validator has been paid! let balance_after = T::Currency::free_balance(&validator); - assert!(balance_before < balance_after); + assert!( + balance_before < balance_after, + "Balance of stash {:?} should have increased after payout.", + validator, + ); } rebond { let l in 1 .. MAX_UNLOCKING_CHUNKS as u32; - let (_, controller) = create_stash_controller::(u, 100)?; + let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); let unlock_chunk = UnlockChunk::> { value: 1.into(), @@ -316,6 +350,7 @@ benchmarks! { } Ledger::::insert(controller.clone(), staking_ledger.clone()); let original_bonded: BalanceOf = staking_ledger.active; + do_whitelist!(controller); }: _(RawOrigin::Signed(controller.clone()), (l + 100).into()) verify { let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; @@ -343,9 +378,10 @@ benchmarks! { reap_stash { let s in 1 .. MAX_SPANS; - let (stash, controller) = create_stash_controller::(0, 100)?; + let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; add_slashing_spans::(&stash, s); T::Currency::make_free_balance_be(&stash, 0.into()); + do_whitelist!(controller); }: _(RawOrigin::Signed(controller), stash.clone(), s) verify { assert!(!Bonded::::contains_key(&stash)); @@ -362,32 +398,7 @@ benchmarks! { assert!(validators.len() == v as usize); } - do_slash { - let l in 1 .. MAX_UNLOCKING_CHUNKS as u32; - let (stash, controller) = create_stash_controller::(0, 100)?; - let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); - let unlock_chunk = UnlockChunk::> { - value: 1.into(), - era: EraIndex::zero(), - }; - for _ in 0 .. l { - staking_ledger.unlocking.push(unlock_chunk.clone()) - } - Ledger::::insert(controller, staking_ledger); - let slash_amount = T::Currency::minimum_balance() * 10.into(); - let balance_before = T::Currency::free_balance(&stash); - }: { - crate::slashing::do_slash::( - &stash, - slash_amount, - &mut BalanceOf::::zero(), - &mut NegativeImbalanceOf::::zero() - ); - } verify { - let balance_after = T::Currency::free_balance(&stash); - assert!(balance_before > balance_after); - } - + #[extra] payout_all { let v in 1 .. 10; let n in 1 .. 100; @@ -419,25 +430,52 @@ benchmarks! { let total_payout = T::Currency::minimum_balance() * 1000.into(); >::insert(current_era, total_payout); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: { for arg in payout_calls_arg { >::payout_stakers(RawOrigin::Signed(caller.clone()).into(), arg.0, arg.1)?; } } - // This benchmark create `v` validators intent, `n` nominators intent, each nominator nominate - // MAX_NOMINATIONS in the set of the first `w` validators. - // It builds a solution with `w` winners composed of nominated validators randomly nominated, - // `a` assignment with MAX_NOMINATIONS. + #[extra] + do_slash { + let l in 1 .. MAX_UNLOCKING_CHUNKS as u32; + let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; + let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + let unlock_chunk = UnlockChunk::> { + value: 1.into(), + era: EraIndex::zero(), + }; + for _ in 0 .. l { + staking_ledger.unlocking.push(unlock_chunk.clone()) + } + Ledger::::insert(controller, staking_ledger); + let slash_amount = T::Currency::minimum_balance() * 10.into(); + let balance_before = T::Currency::free_balance(&stash); + }: { + crate::slashing::do_slash::( + &stash, + slash_amount, + &mut BalanceOf::::zero(), + &mut NegativeImbalanceOf::::zero() + ); + } verify { + let balance_after = T::Currency::free_balance(&stash); + assert!(balance_before > balance_after); + } + + // This benchmark create `v` validators intent, `n` nominators intent, in total creating `e` + // edges. + #[extra] submit_solution_initial { - // number of validator intent - let v in 1000 .. 2000; - // number of nominator intent - let n in 1000 .. 2000; - // number of assignments. Basically, number of active nominators. - let a in 200 .. 500; - // number of winners, also ValidatorCount + // number of validator intention. This will be equal to `ElectionSize::validators`. + let v in 200 .. 400; + // number of nominator intention. This will be equal to `ElectionSize::nominators`. + let n in 500 .. 1000; + // number of assignments. Basically, number of active nominators. This will be equal to + // `compact.len()`. + let a in 200 .. 400; + // number of winners, also ValidatorCount. This will be equal to `winner.len()`. let w in 16 .. 100; ensure!(w as usize >= MAX_NOMINATIONS, "doesn't support lower value"); @@ -466,11 +504,19 @@ benchmarks! { size ) = offchain_election::prepare_submission::(assignments, winners, false).unwrap(); + assert_eq!( + winners.len(), compact.unique_targets().len(), + "unique targets ({}) and winners ({}) count not same. This solution is not valid.", + compact.unique_targets().len(), + winners.len(), + ); + // needed for the solution to be accepted >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); let era = >::current_era().unwrap_or(0); let caller: T::AccountId = account("caller", n, SEED); + do_whitelist!(caller); }: { let result = >::submit_election_solution( RawOrigin::Signed(caller.clone()).into(), @@ -489,13 +535,13 @@ benchmarks! { // same as submit_solution_initial but we place a very weak solution on chian first. submit_solution_better { - // number of validator intent - let v in 1000 .. 2000; - // number of nominator intent - let n in 1000 .. 2000; + // number of validator intention. + let v in 200 .. 400; + // number of nominator intention. + let n in 500 .. 1000; // number of assignments. Basically, number of active nominators. - let a in 200 .. 500; - // number of winners, also ValidatorCount + let a in 200 .. 400; + // number of winners, also ValidatorCount. let w in 16 .. 100; ensure!(w as usize >= MAX_NOMINATIONS, "doesn't support lower value"); @@ -526,11 +572,19 @@ benchmarks! { size ) = offchain_election::prepare_submission::(assignments, winners, false).unwrap(); + assert_eq!( + winners.len(), compact.unique_targets().len(), + "unique targets ({}) and winners ({}) count not same. This solution is not valid.", + compact.unique_targets().len(), + winners.len(), + ); + // needed for the solution to be accepted >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); let era = >::current_era().unwrap_or(0); let caller: T::AccountId = account("caller", n, SEED); + do_whitelist!(caller); // submit a very bad solution on-chain { @@ -568,11 +622,12 @@ benchmarks! { } // This will be early rejected based on the score. + #[extra] submit_solution_weaker { - // number of validator intent - let v in 1000 .. 2000; - // number of nominator intent - let n in 1000 .. 2000; + // number of validator intention. + let v in 200 .. 400; + // number of nominator intention. + let n in 500 .. 1000; create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; @@ -581,12 +636,19 @@ benchmarks! { // needed for the solution to be accepted >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); - let caller: T::AccountId = account("caller", n, SEED); let era = >::current_era().unwrap_or(0); + let caller: T::AccountId = account("caller", n, SEED); + do_whitelist!(caller); // submit a seq-phragmen with all the good stuff on chain. { let (winners, compact, score, size) = get_seq_phragmen_solution::(true); + assert_eq!( + winners.len(), compact.unique_targets().len(), + "unique targets ({}) and winners ({}) count not same. This solution is not valid.", + compact.unique_targets().len(), + winners.len(), + ); assert!( >::submit_election_solution( RawOrigin::Signed(caller.clone()).into(), @@ -650,6 +712,7 @@ mod tests { n, ::MaxNominatorRewardedPerValidator::get() as u32, false, + RewardDestination::Staked, ).unwrap(); let current_era = CurrentEra::get().unwrap(); @@ -671,6 +734,7 @@ mod tests { n, ::MaxNominatorRewardedPerValidator::get() as u32, false, + RewardDestination::Staked, ).unwrap(); // Add 20 slashing spans @@ -703,7 +767,8 @@ mod tests { let closure_to_benchmark = >::instance( &selected_benchmark, - &c + &c, + true ).unwrap(); assert_ok!(closure_to_benchmark()); @@ -730,8 +795,8 @@ mod tests { assert_ok!(test_benchmark_set_invulnerables::()); assert_ok!(test_benchmark_force_unstake::()); assert_ok!(test_benchmark_cancel_deferred_slash::()); - assert_ok!(test_benchmark_payout_stakers::()); - assert_ok!(test_benchmark_payout_stakers_alive_controller::()); + assert_ok!(test_benchmark_payout_stakers_dead_controller::()); + assert_ok!(test_benchmark_payout_stakers_alive_staked::()); assert_ok!(test_benchmark_rebond::()); assert_ok!(test_benchmark_set_history_depth::()); assert_ok!(test_benchmark_reap_stash::()); diff --git a/frame/staking/src/default_weights.rs b/frame/staking/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..fa5a05f63824e6a95862826f62a0a3bcda3a52da --- /dev/null +++ b/frame/staking/src/default_weights.rs @@ -0,0 +1,169 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights of pallet-staking. +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn bond() -> Weight { + (144278000 as Weight) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn bond_extra() -> Weight { + (110715000 as Weight) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn unbond() -> Weight { + (99840000 as Weight) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn withdraw_unbonded_update(s: u32, ) -> Weight { + (100728000 as Weight) + .saturating_add((63000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(5 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + (168879000 as Weight) + .saturating_add((6666000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(7 as Weight)) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn validate() -> Weight { + (35539000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn nominate(n: u32, ) -> Weight { + (48596000 as Weight) + .saturating_add((308000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn chill() -> Weight { + (35144000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_payee() -> Weight { + (24255000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_controller() -> Weight { + (52294000 as Weight) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn set_validator_count() -> Weight { + (5185000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_no_eras() -> Weight { + (5907000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_new_era() -> Weight { + (5917000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_new_era_always() -> Weight { + (5952000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_invulnerables(v: u32, ) -> Weight { + (6324000 as Weight) + .saturating_add((9000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_unstake(s: u32, ) -> Weight { + (119691000 as Weight) + .saturating_add((6681000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn cancel_deferred_slash(s: u32, ) -> Weight { + (5820201000 as Weight) + .saturating_add((34672000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn payout_stakers_dead_controller(n: u32, ) -> Weight { + (0 as Weight) + .saturating_add((92486000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + } + fn payout_stakers_alive_staked(n: u32, ) -> Weight { + (0 as Weight) + .saturating_add((117324000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + } + fn rebond(l: u32, ) -> Weight { + (71316000 as Weight) + .saturating_add((142000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn set_history_depth(e: u32, ) -> Weight { + (0 as Weight) + .saturating_add((51901000 as Weight).saturating_mul(e as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + .saturating_add(DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + } + fn reap_stash(s: u32, ) -> Weight { + (147166000 as Weight) + .saturating_add((6661000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + fn new_era(v: u32, n: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1440459000 as Weight).saturating_mul(v as Weight)) + .saturating_add((182580000 as Weight).saturating_mul(n as Weight)) + .saturating_add(DbWeight::get().reads(10 as Weight)) + .saturating_add(DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(DbWeight::get().writes(8 as Weight)) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { + (0 as Weight) + .saturating_add((964000 as Weight).saturating_mul(v as Weight)) + .saturating_add((432000 as Weight).saturating_mul(n as Weight)) + .saturating_add((204294000 as Weight).saturating_mul(a as Weight)) + .saturating_add((9546000 as Weight).saturating_mul(w as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 8f5b8561eb420b1a0e0877b526a5a4bf4a8ae485..7061832b0460c06d1a8c6e41ec5cd5313c898216 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -279,6 +279,7 @@ pub mod benchmarking; pub mod slashing; pub mod offchain_election; pub mod inflation; +pub mod default_weights; use sp_std::{ result, @@ -356,6 +357,8 @@ pub type ValidatorIndex = u16; // Ensure the size of both ValidatorIndex and NominatorIndex. They both need to be well below usize. static_assertions::const_assert!(size_of::() <= size_of::()); static_assertions::const_assert!(size_of::() <= size_of::()); +static_assertions::const_assert!(size_of::() <= size_of::()); +static_assertions::const_assert!(size_of::() <= size_of::()); /// Maximum number of stakers that can be stored in a snapshot. pub(crate) const MAX_VALIDATORS: usize = ValidatorIndex::max_value() as usize; @@ -425,16 +428,18 @@ pub enum StakerStatus { /// A destination account for payment. #[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug)] -pub enum RewardDestination { +pub enum RewardDestination { /// Pay into the stash account, increasing the amount at stake accordingly. Staked, /// Pay into the stash account, not increasing the amount at stake. Stash, /// Pay into the controller account. Controller, + /// Pay into a specified account. + Account(AccountId), } -impl Default for RewardDestination { +impl Default for RewardDestination { fn default() -> Self { RewardDestination::Staked } @@ -764,132 +769,31 @@ impl SessionInterface<::AccountId> for T whe } } -pub mod weight { - use super::*; - - /// All weight notes are pertaining to the case of a better solution, in which we execute - /// the longest code path. - /// Weight: 0 + (0.63 μs * v) + (0.36 μs * n) + (96.53 μs * a ) + (8 μs * w ) with: - /// * v validators in snapshot validators, - /// * n nominators in snapshot nominators, - /// * a assignment in the submitted solution - /// * w winners in the submitted solution - /// - /// State reads: - /// - Initial checks: - /// - ElectionState, CurrentEra, QueuedScore - /// - SnapshotValidators.len() + SnapShotNominators.len() - /// - ValidatorCount - /// - SnapshotValidators - /// - SnapshotNominators - /// - Iterate over nominators: - /// - compact.len() * Nominators(who) - /// - (non_self_vote_edges) * SlashingSpans - /// - For `assignment_ratio_to_staked`: Basically read the staked value of each stash. - /// - (winners.len() + compact.len()) * (Ledger + Bonded) - /// - TotalIssuance (read a gzillion times potentially, but well it is cached.) - /// - State writes: - /// - QueuedElected, QueuedScore - pub fn weight_for_submit_solution( - winners: &Vec, - compact: &CompactAssignments, - size: &ElectionSize, - ) -> Weight { - (630 * WEIGHT_PER_NANOS).saturating_mul(size.validators as Weight) - .saturating_add((360 * WEIGHT_PER_NANOS).saturating_mul(size.nominators as Weight)) - .saturating_add((96 * WEIGHT_PER_MICROS).saturating_mul(compact.len() as Weight)) - .saturating_add((8 * WEIGHT_PER_MICROS).saturating_mul(winners.len() as Weight)) - // Initial checks - .saturating_add(T::DbWeight::get().reads(8)) - // Nominators - .saturating_add(T::DbWeight::get().reads(compact.len() as Weight)) - // SlashingSpans (upper bound for invalid solution) - .saturating_add(T::DbWeight::get().reads(compact.edge_count() as Weight)) - // `assignment_ratio_to_staked` - .saturating_add(T::DbWeight::get().reads(2 * ((winners.len() + compact.len()) as Weight))) - .saturating_add(T::DbWeight::get().reads(1)) - // write queued score and elected - .saturating_add(T::DbWeight::get().writes(2)) - } - - /// Weight of `submit_solution` in case of a correct submission. - /// - /// refund: we charged compact.len() * read(1) for SlashingSpans. A valid solution only reads - /// winners.len(). - pub fn weight_for_correct_submit_solution( - winners: &Vec, - compact: &CompactAssignments, - size: &ElectionSize, - ) -> Weight { - // NOTE: for consistency, we re-compute the original weight to maintain their relation and - // prevent any foot-guns. - let original_weight = weight_for_submit_solution::(winners, compact, size); - original_weight - .saturating_sub(T::DbWeight::get().reads(compact.edge_count() as Weight)) - .saturating_add(T::DbWeight::get().reads(winners.len() as Weight)) - } -} - pub trait WeightInfo { - fn bond(u: u32, ) -> Weight; - fn bond_extra(u: u32, ) -> Weight; - fn unbond(u: u32, ) -> Weight; + fn bond() -> Weight; + fn bond_extra() -> Weight; + fn unbond() -> Weight; fn withdraw_unbonded_update(s: u32, ) -> Weight; fn withdraw_unbonded_kill(s: u32, ) -> Weight; - fn validate(u: u32, ) -> Weight; + fn validate() -> Weight; fn nominate(n: u32, ) -> Weight; - fn chill(u: u32, ) -> Weight; - fn set_payee(u: u32, ) -> Weight; - fn set_controller(u: u32, ) -> Weight; - fn set_validator_count(c: u32, ) -> Weight; - fn force_no_eras(i: u32, ) -> Weight; - fn force_new_era(i: u32, ) -> Weight; - fn force_new_era_always(i: u32, ) -> Weight; + fn chill() -> Weight; + fn set_payee() -> Weight; + fn set_controller() -> Weight; + fn set_validator_count() -> Weight; + fn force_no_eras() -> Weight; + fn force_new_era() -> Weight; + fn force_new_era_always() -> Weight; fn set_invulnerables(v: u32, ) -> Weight; fn force_unstake(s: u32, ) -> Weight; fn cancel_deferred_slash(s: u32, ) -> Weight; - fn payout_stakers(n: u32, ) -> Weight; - fn payout_stakers_alive_controller(n: u32, ) -> Weight; + fn payout_stakers_alive_staked(n: u32, ) -> Weight; + fn payout_stakers_dead_controller(n: u32, ) -> Weight; fn rebond(l: u32, ) -> Weight; fn set_history_depth(e: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; fn new_era(v: u32, n: u32, ) -> Weight; - fn do_slash(l: u32, ) -> Weight; - fn payout_all(v: u32, n: u32, ) -> Weight; - fn submit_solution_initial(v: u32, n: u32, a: u32, w: u32, ) -> Weight; fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight; - fn submit_solution_weaker(v: u32, n: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn bond(_u: u32, ) -> Weight { 1_000_000_000 } - fn bond_extra(_u: u32, ) -> Weight { 1_000_000_000 } - fn unbond(_u: u32, ) -> Weight { 1_000_000_000 } - fn withdraw_unbonded_update(_s: u32, ) -> Weight { 1_000_000_000 } - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { 1_000_000_000 } - fn validate(_u: u32, ) -> Weight { 1_000_000_000 } - fn nominate(_n: u32, ) -> Weight { 1_000_000_000 } - fn chill(_u: u32, ) -> Weight { 1_000_000_000 } - fn set_payee(_u: u32, ) -> Weight { 1_000_000_000 } - fn set_controller(_u: u32, ) -> Weight { 1_000_000_000 } - fn set_validator_count(_c: u32, ) -> Weight { 1_000_000_000 } - fn force_no_eras(_i: u32, ) -> Weight { 1_000_000_000 } - fn force_new_era(_i: u32, ) -> Weight { 1_000_000_000 } - fn force_new_era_always(_i: u32, ) -> Weight { 1_000_000_000 } - fn set_invulnerables(_v: u32, ) -> Weight { 1_000_000_000 } - fn force_unstake(_s: u32, ) -> Weight { 1_000_000_000 } - fn cancel_deferred_slash(_s: u32, ) -> Weight { 1_000_000_000 } - fn payout_stakers(_n: u32, ) -> Weight { 1_000_000_000 } - fn payout_stakers_alive_controller(_n: u32, ) -> Weight { 1_000_000_000 } - fn rebond(_l: u32, ) -> Weight { 1_000_000_000 } - fn set_history_depth(_e: u32, ) -> Weight { 1_000_000_000 } - fn reap_stash(_s: u32, ) -> Weight { 1_000_000_000 } - fn new_era(_v: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn do_slash(_l: u32, ) -> Weight { 1_000_000_000 } - fn payout_all(_v: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn submit_solution_initial(_v: u32, _n: u32, _a: u32, _w: u32, ) -> Weight { 1_000_000_000 } - fn submit_solution_better(_v: u32, _n: u32, _a: u32, _w: u32, ) -> Weight { 1_000_000_000 } - fn submit_solution_weaker(_v: u32, _n: u32, ) -> Weight { 1_000_000_000 } } pub trait Trait: frame_system::Trait + SendTransactionTypes> { @@ -1049,7 +953,7 @@ decl_storage! { => Option>>; /// Where the reward payment should be made. Keyed by stash. - pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination; + pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination; /// The map from (wannabe) validator stash key to the preferences of that validator. pub Validators get(fn validators): @@ -1160,7 +1064,7 @@ decl_storage! { => Option>; /// Slashing spans for stash accounts. - SlashingSpans: map hasher(twox_64_concat) T::AccountId => Option; + SlashingSpans get(fn slashing_spans): map hasher(twox_64_concat) T::AccountId => Option; /// Records information about the maximum slash of a stash within a slashing span, /// as well as how much reward has been paid out. @@ -1239,29 +1143,29 @@ decl_event!( pub enum Event where Balance = BalanceOf, ::AccountId { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. - /// [era_index, validator_payout, remainder] + /// \[era_index, validator_payout, remainder\] EraPayout(EraIndex, Balance, Balance), - /// The staker has been rewarded by this amount. [stash, amount] + /// The staker has been rewarded by this amount. \[stash, amount\] Reward(AccountId, Balance), /// One validator (and its nominators) has been slashed by the given amount. - /// [validator, amount] + /// \[validator, amount\] Slash(AccountId, Balance), /// An old slashing report from a prior era was discarded because it could - /// not be processed. [session_index] + /// not be processed. \[session_index\] OldSlashingReportDiscarded(SessionIndex), - /// A new set of stakers was elected with the given [compute]. + /// A new set of stakers was elected with the given \[compute\]. StakingElection(ElectionCompute), - /// A new solution for the upcoming election has been stored. [compute] + /// A new solution for the upcoming election has been stored. \[compute\] SolutionStored(ElectionCompute), - /// An account has bonded this amount. [stash, amount] + /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, /// it will not be emitted for staking rewards when they are added to stake. Bonded(AccountId, Balance), - /// An account has unbonded this amount. [stash, amount] + /// An account has unbonded this amount. \[stash, amount\] Unbonded(AccountId, Balance), /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` - /// from the unlocking queue. [stash, amount] + /// from the unlocking queue. \[stash, amount\] Withdrawn(AccountId, Balance), } ); @@ -1487,16 +1391,16 @@ decl_module! { /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned /// unless the `origin` falls below _existential deposit_ and gets removed as dust. /// ------------------ - /// Base Weight: 67.87 µs + /// Weight: O(1) /// DB Weight: /// - Read: Bonded, Ledger, [Origin Account], Current Era, History Depth, Locks /// - Write: Bonded, Payee, [Origin Account], Locks, Ledger /// # - #[weight = 67 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(5, 4)] + #[weight = T::WeightInfo::bond()] pub fn bond(origin, controller: ::Source, #[compact] value: BalanceOf, - payee: RewardDestination, + payee: RewardDestination, ) { let stash = ensure_signed(origin)?; @@ -1556,12 +1460,11 @@ decl_module! { /// - O(1). /// - One DB entry. /// ------------ - /// Base Weight: 54.88 µs /// DB Weight: /// - Read: Era Election Status, Bonded, Ledger, [Origin Account], Locks /// - Write: [Origin Account], Locks, Ledger /// # - #[weight = 55 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(4, 2)] + #[weight = T::WeightInfo::bond_extra()] fn bond_extra(origin, #[compact] max_additional: BalanceOf) { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let stash = ensure_signed(origin)?; @@ -1607,12 +1510,12 @@ decl_module! { /// `withdraw_unbonded`. /// - One DB entry. /// ---------- - /// Base Weight: 50.34 µs + /// Weight: O(1) /// DB Weight: - /// - Read: Era Election Status, Ledger, Current Era, Locks, [Origin Account] - /// - Write: [Origin Account], Locks, Ledger + /// - Read: EraElectionStatus, Ledger, CurrentEra, Locks, BalanceOf Stash, + /// - Write: Locks, Ledger, BalanceOf Stash, /// - #[weight = 50 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(4, 2)] + #[weight = T::WeightInfo::unbond()] fn unbond(origin, #[compact] value: BalanceOf) { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; @@ -1661,25 +1564,18 @@ decl_module! { /// - Writes are limited to the `origin` account key. /// --------------- /// Complexity O(S) where S is the number of slashing spans to remove - /// Base Weight: - /// Update: 50.52 + .028 * S µs + /// Update: /// - Reads: EraElectionStatus, Ledger, Current Era, Locks, [Origin Account] /// - Writes: [Origin Account], Locks, Ledger - /// Kill: 79.41 + 2.366 * S µs - /// - Reads: EraElectionStatus, Ledger, Current Era, Bonded, Slashing Spans, [Origin Account], Locks - /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, [Origin Account], Locks + /// Kill: + /// - Reads: EraElectionStatus, Ledger, Current Era, Bonded, Slashing Spans, [Origin + /// Account], Locks, BalanceOf stash + /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, + /// [Origin Account], Locks, BalanceOf stash. /// - Writes Each: SpanSlash * S /// NOTE: Weight annotation is the kill scenario, we refund otherwise. /// # - #[weight = T::DbWeight::get().reads_writes(6, 6) - .saturating_add(80 * WEIGHT_PER_MICROS) - .saturating_add( - (2 * WEIGHT_PER_MICROS).saturating_mul(Weight::from(*num_slashing_spans)) - ) - .saturating_add(T::DbWeight::get().writes(Weight::from(*num_slashing_spans))) - // if slashing spans is non-zero, add 1 more write - .saturating_add(T::DbWeight::get().writes(Weight::from(*num_slashing_spans).min(1))) - ] + #[weight = T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans)] fn withdraw_unbonded(origin, num_slashing_spans: u32) -> DispatchResultWithPostInfo { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; @@ -1701,8 +1597,9 @@ decl_module! { } else { // This was the consequence of a partial unbond. just update the ledger and move on. Self::update_ledger(&controller, &ledger); - // This is only an update, so we use less overall weight - Some(50 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(4, 2)) + + // This is only an update, so we use less overall weight. + Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) }; // `old_total` should never be less than the new total because @@ -1728,12 +1625,12 @@ decl_module! { /// - Contains a limited number of reads. /// - Writes are limited to the `origin` account key. /// ----------- - /// Base Weight: 17.13 µs + /// Weight: O(1) /// DB Weight: /// - Read: Era Election Status, Ledger /// - Write: Nominators, Validators /// # - #[weight = 17 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::validate()] pub fn validate(origin, prefs: ValidatorPrefs) { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; @@ -1756,16 +1653,13 @@ decl_module! { /// which is capped at CompactAssignments::LIMIT (MAX_NOMINATIONS). /// - Both the reads and writes follow a similar pattern. /// --------- - /// Base Weight: 22.34 + .36 * N µs + /// Weight: O(N) /// where N is the number of targets /// DB Weight: /// - Reads: Era Election Status, Ledger, Current Era /// - Writes: Validators, Nominators /// # - #[weight = T::DbWeight::get().reads_writes(3, 2) - .saturating_add(22 * WEIGHT_PER_MICROS) - .saturating_add((360 * WEIGHT_PER_NANOS).saturating_mul(targets.len() as Weight)) - ] + #[weight = T::WeightInfo::nominate(targets.len() as u32)] pub fn nominate(origin, targets: Vec<::Source>) { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; @@ -1800,12 +1694,12 @@ decl_module! { /// - Contains one read. /// - Writes are limited to the `origin` account key. /// -------- - /// Base Weight: 16.53 µs + /// Weight: O(1) /// DB Weight: /// - Read: EraElectionStatus, Ledger /// - Write: Validators, Nominators /// # - #[weight = 16 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::chill()] fn chill(origin) { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; @@ -1824,13 +1718,13 @@ decl_module! { /// - Contains a limited number of reads. /// - Writes are limited to the `origin` account key. /// --------- - /// - Base Weight: 11.33 µs + /// - Weight: O(1) /// - DB Weight: /// - Read: Ledger /// - Write: Payee /// # - #[weight = 11 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(1, 1)] - fn set_payee(origin, payee: RewardDestination) { + #[weight = T::WeightInfo::set_payee()] + fn set_payee(origin, payee: RewardDestination) { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1848,12 +1742,12 @@ decl_module! { /// - Contains a limited number of reads. /// - Writes are limited to the `origin` account key. /// ---------- - /// Base Weight: 25.22 µs + /// Weight: O(1) /// DB Weight: /// - Read: Bonded, Ledger New Controller, Ledger Old Controller /// - Write: Bonded, Ledger New Controller, Ledger Old Controller /// # - #[weight = 25 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(3, 3)] + #[weight = T::WeightInfo::set_controller()] fn set_controller(origin, controller: ::Source) { let stash = ensure_signed(origin)?; let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; @@ -1874,10 +1768,10 @@ decl_module! { /// The dispatch origin must be Root. /// /// # - /// Base Weight: 1.717 µs + /// Weight: O(1) /// Write: Validator Count /// # - #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().writes(1)] + #[weight = T::WeightInfo::set_validator_count()] fn set_validator_count(origin, #[compact] new: u32) { ensure_root(origin)?; ValidatorCount::put(new); @@ -1888,10 +1782,9 @@ decl_module! { /// The dispatch origin must be Root. /// /// # - /// Base Weight: 1.717 µs - /// Read/Write: Validator Count + /// Same as [`set_validator_count`]. /// # - #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(1, 1)] + #[weight = T::WeightInfo::set_validator_count()] fn increase_validator_count(origin, #[compact] additional: u32) { ensure_root(origin)?; ValidatorCount::mutate(|n| *n += additional); @@ -1902,10 +1795,9 @@ decl_module! { /// The dispatch origin must be Root. /// /// # - /// Base Weight: 1.717 µs - /// Read/Write: Validator Count + /// Same as [`set_validator_count`]. /// # - #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().reads_writes(1, 1)] + #[weight = T::WeightInfo::set_validator_count()] fn scale_validator_count(origin, factor: Percent) { ensure_root(origin)?; ValidatorCount::mutate(|n| *n += factor * *n); @@ -1917,10 +1809,10 @@ decl_module! { /// /// # /// - No arguments. - /// - Base Weight: 1.857 µs + /// - Weight: O(1) /// - Write: ForceEra /// # - #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().writes(1)] + #[weight = T::WeightInfo::force_no_eras()] fn force_no_eras(origin) { ensure_root(origin)?; ForceEra::put(Forcing::ForceNone); @@ -1933,10 +1825,10 @@ decl_module! { /// /// # /// - No arguments. - /// - Base Weight: 1.959 µs + /// - Weight: O(1) /// - Write ForceEra /// # - #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().writes(1)] + #[weight = T::WeightInfo::force_new_era()] fn force_new_era(origin) { ensure_root(origin)?; ForceEra::put(Forcing::ForceNew); @@ -1948,16 +1840,12 @@ decl_module! { /// /// # /// - O(V) - /// - Base Weight: 2.208 + .006 * V µs /// - Write: Invulnerables /// # - #[weight = T::DbWeight::get().writes(1) - .saturating_add(2 * WEIGHT_PER_MICROS) - .saturating_add((6 * WEIGHT_PER_NANOS).saturating_mul(validators.len() as Weight)) - ] - fn set_invulnerables(origin, validators: Vec) { + #[weight = T::WeightInfo::set_invulnerables(invulnerables.len() as u32)] + fn set_invulnerables(origin, invulnerables: Vec) { ensure_root(origin)?; - >::put(validators); + >::put(invulnerables); } /// Force a current staker to become completely unstaked, immediately. @@ -1966,20 +1854,11 @@ decl_module! { /// /// # /// O(S) where S is the number of slashing spans to be removed - /// Base Weight: 53.07 + 2.365 * S µs /// Reads: Bonded, Slashing Spans, Account, Locks /// Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Account, Locks /// Writes Each: SpanSlash * S /// # - #[weight = T::DbWeight::get().reads_writes(4, 7) - .saturating_add(53 * WEIGHT_PER_MICROS) - .saturating_add( - WEIGHT_PER_MICROS.saturating_mul(2).saturating_mul(Weight::from(*num_slashing_spans)) - ) - .saturating_add(T::DbWeight::get().writes(Weight::from(*num_slashing_spans))) - // if slashing spans is non-zero, add 1 more write - .saturating_add(T::DbWeight::get().writes(Weight::from(*num_slashing_spans > 0))) - ] + #[weight = T::WeightInfo::force_unstake(*num_slashing_spans)] fn force_unstake(origin, stash: T::AccountId, num_slashing_spans: u32) { ensure_root(origin)?; @@ -1995,10 +1874,10 @@ decl_module! { /// The dispatch origin must be Root. /// /// # - /// - Base Weight: 2.05 µs + /// - Weight: O(1) /// - Write: ForceEra /// # - #[weight = 2 * WEIGHT_PER_MICROS + T::DbWeight::get().writes(1)] + #[weight = T::WeightInfo::force_new_era_always()] fn force_new_era_always(origin) { ensure_root(origin)?; ForceEra::put(Forcing::ForceAlways); @@ -2014,14 +1893,10 @@ decl_module! { /// Complexity: O(U + S) /// with U unapplied slashes weighted with U=1000 /// and S is the number of slash indices to be canceled. - /// - Base: 5870 + 34.61 * S µs /// - Read: Unapplied Slashes /// - Write: Unapplied Slashes /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(5_870 * WEIGHT_PER_MICROS) - .saturating_add((35 * WEIGHT_PER_MICROS).saturating_mul(slash_indices.len() as Weight)) - ] + #[weight = T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32)] fn cancel_deferred_slash(origin, era: EraIndex, slash_indices: Vec) { T::SlashCancelOrigin::ensure_origin(origin)?; @@ -2056,22 +1931,19 @@ decl_module! { /// - Contains a limited number of reads and writes. /// ----------- /// N is the Number of payouts for the validator (including the validator) - /// Base Weight: - /// - Reward Destination Staked: 110 + 54.2 * N µs (Median Slopes) - /// - Reward Destination Controller (Creating): 120 + 41.95 * N µs (Median Slopes) + /// Weight: + /// - Reward Destination Staked: O(N) + /// - Reward Destination Controller (Creating): O(N) /// DB Weight: /// - Read: EraElectionStatus, CurrentEra, HistoryDepth, ErasValidatorReward, /// ErasStakersClipped, ErasRewardPoints, ErasValidatorPrefs (8 items) /// - Read Each: Bonded, Ledger, Payee, Locks, System Account (5 items) /// - Write Each: System Account, Locks, Ledger (3 items) + /// + /// NOTE: weights are assuming that payouts are made to alive stash account (Staked). + /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. /// # - #[weight = - 120 * WEIGHT_PER_MICROS - + 54 * WEIGHT_PER_MICROS * Weight::from(T::MaxNominatorRewardedPerValidator::get()) - + T::DbWeight::get().reads(7) - + T::DbWeight::get().reads(5) * Weight::from(T::MaxNominatorRewardedPerValidator::get() + 1) - + T::DbWeight::get().writes(3) * Weight::from(T::MaxNominatorRewardedPerValidator::get() + 1) - ] + #[weight = T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get())] fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); ensure_signed(origin)?; @@ -2088,16 +1960,11 @@ decl_module! { /// - Bounded by `MAX_UNLOCKING_CHUNKS`. /// - Storage changes: Can't increase storage, only decrease it. /// --------------- - /// - Base Weight: 34.51 µs * .048 L µs /// - DB Weight: /// - Reads: EraElectionStatus, Ledger, Locks, [Origin Account] /// - Writes: [Origin Account], Locks, Ledger /// # - #[weight = - 35 * WEIGHT_PER_MICROS - + 50 * WEIGHT_PER_NANOS * (MAX_UNLOCKING_CHUNKS as Weight) - + T::DbWeight::get().reads_writes(3, 2) - ] + #[weight = T::WeightInfo::rebond(MAX_UNLOCKING_CHUNKS as u32)] fn rebond(origin, #[compact] value: BalanceOf) -> DispatchResultWithPostInfo { ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; @@ -2127,19 +1994,14 @@ decl_module! { /// /// # /// - E: Number of history depths removed, i.e. 10 -> 7 = 3 - /// - Base Weight: 29.13 * E µs + /// - Weight: O(E) /// - DB Weight: /// - Reads: Current Era, History Depth /// - Writes: History Depth /// - Clear Prefix Each: Era Stakers, EraStakersClipped, ErasValidatorPrefs /// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, ErasStartSessionIndex /// # - #[weight = { - let items = Weight::from(*_era_items_deleted); - T::DbWeight::get().reads_writes(2, 1) - .saturating_add(T::DbWeight::get().reads_writes(items, items)) - - }] + #[weight = T::WeightInfo::set_history_depth(*_era_items_deleted)] fn set_history_depth(origin, #[compact] new_history_depth: EraIndex, #[compact] _era_items_deleted: u32, @@ -2167,21 +2029,12 @@ decl_module! { /// /// # /// Complexity: O(S) where S is the number of slashing spans on the account. - /// Base Weight: 75.94 + 2.396 * S µs /// DB Weight: /// - Reads: Stash Account, Bonded, Slashing Spans, Locks /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Stash Account, Locks /// - Writes Each: SpanSlash * S /// # - #[weight = T::DbWeight::get().reads_writes(4, 7) - .saturating_add(76 * WEIGHT_PER_MICROS) - .saturating_add( - WEIGHT_PER_MICROS.saturating_mul(2).saturating_mul(Weight::from(*num_slashing_spans)) - ) - .saturating_add(T::DbWeight::get().writes(Weight::from(*num_slashing_spans))) - // if slashing spans is non-zero, add 1 more write - .saturating_add(T::DbWeight::get().writes(Weight::from(*num_slashing_spans).min(1))) - ] + #[weight = T::WeightInfo::reap_stash(*num_slashing_spans)] fn reap_stash(_origin, stash: T::AccountId, num_slashing_spans: u32) { ensure!(T::Currency::total_balance(&stash).is_zero(), Error::::FundedTarget); Self::kill_stash(&stash, num_slashing_spans)?; @@ -2233,9 +2086,16 @@ decl_module! { /// minimized (to ensure less variance) /// /// # - /// See `crate::weight` module. + /// The transaction is assumed to be the longest path, a better solution. + /// - Initial solution is almost the same. + /// - Worse solution is retraced in pre-dispatch-checks which sets its own weight. /// # - #[weight = weight::weight_for_submit_solution::(winners, compact, size)] + #[weight = T::WeightInfo::submit_solution_better( + size.validators.into(), + size.nominators.into(), + compact.len() as u32, + winners.len() as u32, + )] pub fn submit_election_solution( origin, winners: Vec, @@ -2264,7 +2124,12 @@ decl_module! { /// # /// See `crate::weight` module. /// # - #[weight = weight::weight_for_submit_solution::(winners, compact, size)] + #[weight = T::WeightInfo::submit_solution_better( + size.validators.into(), + size.nominators.into(), + compact.len() as u32, + winners.len() as u32, + )] pub fn submit_election_solution_unsigned( origin, winners: Vec, @@ -2489,6 +2354,9 @@ impl Module { Self::update_ledger(&controller, &l); r }), + RewardDestination::Account(dest_account) => { + Some(T::Currency::deposit_creating(&dest_account, amount)) + } } } @@ -2575,13 +2443,16 @@ impl Module { election_size: ElectionSize, ) -> DispatchResultWithPostInfo { // Do the basic checks. era, claimed score and window open. - Self::pre_dispatch_checks(claimed_score, era)?; - // the weight that we will refund in case of a correct submission. We compute this now - // because the data needed for it will be consumed further down. - let adjusted_weight = weight::weight_for_correct_submit_solution::( - &winners, - &compact_assignments, - &election_size, + let _ = Self::pre_dispatch_checks(claimed_score, era)?; + + // before we read any further state, we check that the unique targets in compact is same as + // compact. is a all in-memory check and easy to do. Moreover, it ensures that the solution + // is not full of bogus edges that can cause lots of reads to SlashingSpans. Thus, we can + // assume that the storage access of this function is always O(|winners|), not + // O(|compact.edge_count()|). + ensure!( + compact_assignments.unique_targets().len() == winners.len(), + Error::::OffchainElectionBogusWinnerCount, ); // Check that the number of presented winners is sane. Most often we have more candidates @@ -2739,7 +2610,7 @@ impl Module { // emit event. Self::deposit_event(RawEvent::SolutionStored(compute)); - Ok(Some(adjusted_weight).into()) + Ok(None.into()) } /// Start a session potentially starting an era. @@ -2857,7 +2728,6 @@ impl Module { maybe_new_validators } - /// Remove all the storage items associated with the election. fn close_election_window() { // Close window. diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index dcdacfbaacb04967c744ccb40aeee294e8c48e8c..ce1aa9339d4ba8c50f97ff690e8d0e4491c108a4 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -220,13 +220,14 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = Balance; type Event = MetaEvent; type DustRemoval = (); @@ -451,7 +452,7 @@ impl ExtBuilder { MAX_ITERATIONS.with(|v| *v.borrow_mut() = self.max_offchain_iterations); } pub fn build(self) -> sp_io::TestExternalities { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); self.set_associated_constants(); let mut storage = frame_system::GenesisConfig::default() .build_storage::() diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 02acd135e63e4ea89b4e5f6645663d22454ae615..6354014232d50b357d928d9c475c33af20177dc2 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -20,7 +20,7 @@ use crate::*; use crate::Module as Staking; -use frame_benchmarking::{account}; +use frame_benchmarking::account; use frame_system::RawOrigin; use sp_io::hashing::blake2_256; use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; @@ -29,7 +29,11 @@ use sp_npos_elections::*; const SEED: u32 = 0; /// Grab a funded user. -pub fn create_funded_user(string: &'static str, n: u32, balance_factor: u32) -> T::AccountId { +pub fn create_funded_user( + string: &'static str, + n: u32, + balance_factor: u32, +) -> T::AccountId { let user = account(string, n, SEED); let balance = T::Currency::minimum_balance() * balance_factor.into(); T::Currency::make_free_balance_be(&user, balance); @@ -39,30 +43,36 @@ pub fn create_funded_user(string: &'static str, n: u32, balance_factor } /// Create a stash and controller pair. -pub fn create_stash_controller(n: u32, balance_factor: u32) +pub fn create_stash_controller( + n: u32, + balance_factor: u32, + destination: RewardDestination, +) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); let controller = create_funded_user::("controller", n, balance_factor); let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); - let reward_destination = RewardDestination::Staked; let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); - Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, reward_destination)?; + Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, destination)?; return Ok((stash, controller)) } /// Create a stash and controller pair, where the controller is dead, and payouts go to controller. /// This is used to test worst case payout scenarios. -pub fn create_stash_and_dead_controller(n: u32, balance_factor: u32) +pub fn create_stash_and_dead_controller( + n: u32, + balance_factor: u32, + destination: RewardDestination, +) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); // controller has no funds let controller = create_funded_user::("controller", n, 0); let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); - let reward_destination = RewardDestination::Controller; let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); - Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, reward_destination)?; + Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, destination)?; return Ok((stash, controller)) } @@ -73,7 +83,7 @@ pub fn create_validators( ) -> Result::Source>, &'static str> { let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); for i in 0 .. max { - let (stash, controller) = create_stash_controller::(i, balance_factor)?; + let (stash, controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), }; @@ -110,7 +120,7 @@ pub fn create_validators_with_nominators_for_era( // Create validators for i in 0 .. validators { let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; - let (v_stash, v_controller) = create_stash_controller::(i, balance_factor)?; + let (v_stash, v_controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), }; @@ -128,6 +138,7 @@ pub fn create_validators_with_nominators_for_era( let (_n_stash, n_controller) = create_stash_controller::( u32::max_value() - j, balance_factor, + RewardDestination::Staked, )?; // Have them randomly validate diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 278e5323876724a035c3795576957b40136c6c1d..3feebfbc8ace9ffa18121cd5293f33ba6617a510 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1734,7 +1734,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_npos_election() { .collect::>(), vec![(31, 1000), (21, 1000), (11, 1000)], ); - assert_eq!(>::iter().map(|(n, _)| n).collect::>(), vec![]); + assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); // give the man some money let initial_balance = 1000; @@ -1782,7 +1782,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_npos_election_elected() { .collect::>(), vec![(31, 100), (21, 1000), (11, 1000)], ); - assert_eq!(>::iter().map(|(n, _)| n).collect::>(), vec![]); + assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); // give the man some money let initial_balance = 1000; @@ -3615,7 +3615,7 @@ mod offchain_election { // A validator index which is out of bound ExtBuilder::default() .offchain_election_ext() - .validator_count(4) + .validator_count(2) .has_stakers(false) .build() .execute_with(|| { @@ -3627,7 +3627,7 @@ mod offchain_election { let (mut compact, winners, score) = prepare_submission_with(true, 2, |_| {}); // index 4 doesn't exist. - compact.votes1.push((3, 4)); + compact.votes1.iter_mut().for_each(|(_, vidx)| if *vidx == 1 { *vidx = 4 }); // The error type sadly cannot be more specific now. assert_noop!( @@ -3688,12 +3688,57 @@ mod offchain_election { assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (compact, winners, score) = prepare_submission_with(true, 2, |a| { + // swap all 11 and 41s in the distribution with non-winners. Note that it is + // important that the count of winners and the count of unique targets remain + // valid. + a.iter_mut().for_each(| StakedAssignment { who, distribution } | + distribution.iter_mut().for_each(|(t, _)| { + if *t == 41 { *t = 31 } else { *t = 21 } + // if it is self vote, correct that. + if *who == 41 { *who = 31 } + if *who == 11 { *who = 21 } + }) + ); + }); + + assert_noop!( + submit_solution( + Origin::signed(10), + winners, + compact, + score, + ), + Error::::OffchainElectionBogusNomination, + ); + }) + } + + #[test] + fn offchain_election_unique_target_count_is_checked() { + // Number of unique targets and and winners.len must match. + ExtBuilder::default() + .offchain_election_ext() + .validator_count(2) // we select only 2. + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_election_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (compact, winners, score) = prepare_submission_with(true, 2, |a| { a.iter_mut() .find(|x| x.who == 5) - // all 3 cannot be among the winners. Although, all of them are validator - // candidates. - .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); + // just add any new target. + .map(|x| { + // old value. + assert_eq!(x.distribution, vec![(41, 100)]); + // new value. + x.distribution = vec![(21, 50), (41, 50)] + }); }); assert_noop!( @@ -3703,7 +3748,7 @@ mod offchain_election { compact, score, ), - Error::::OffchainElectionBogusEdge, + Error::::OffchainElectionBogusWinnerCount, ); }) } @@ -4359,7 +4404,7 @@ fn test_payout_stakers() { // We also test that `payout_extra_nominators` works. ExtBuilder::default().has_stakers(false).build_and_execute(|| { let balance = 1000; - // Create three validators: + // Create a validator: bond_validator(11, 10, balance); // Default(64) // Create nominators, targeting stash of validators @@ -4597,15 +4642,12 @@ fn on_initialize_weight_is_correct() { }); } - #[test] fn payout_creates_controller() { - // Here we will test validator can set `max_nominators_payout` and it works. - // We also test that `payout_extra_nominators` works. ExtBuilder::default().has_stakers(false).build_and_execute(|| { let balance = 1000; - // Create three validators: - bond_validator(11, 10, balance); // Default(64) + // Create a validator: + bond_validator(11, 10, balance); // Create a stash/controller pair bond_nominator(1234, 1337, 100, vec![11]); @@ -4626,3 +4668,32 @@ fn payout_creates_controller() { assert!(Balances::free_balance(1337) > 0); }) } + +#[test] +fn payout_to_any_account_works() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Create a validator: + bond_validator(11, 10, balance); // Default(64) + + // Create a stash/controller pair + bond_nominator(1234, 1337, 100, vec![11]); + + // Update payout location + assert_ok!(Staking::set_payee(Origin::signed(1337), RewardDestination::Account(42))); + + // Reward Destination account doesn't exist + assert_eq!(Balances::free_balance(42), 0); + + mock::start_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(2); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + + // Payment is successful + assert!(Balances::free_balance(42) > 0); + }) +} diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index ba68aa49470fd4d71c33d8f62f7274ad9d256b2c..4713baea518f96ab210b74b1f79f0bcdb4cb956a 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-sudo" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for sudo" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/sudo/README.md b/frame/sudo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..233727ac1bd28b819d0b4b724e239ba3d6e8a9a2 --- /dev/null +++ b/frame/sudo/README.md @@ -0,0 +1,70 @@ +# Sudo Module + +- [`sudo::Trait`](https://docs.rs/pallet-sudo/latest/pallet_sudo/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-sudo/latest/pallet_sudo/enum.Call.html) + +## Overview + +The Sudo module allows for a single account (called the "sudo key") +to execute dispatchable functions that require a `Root` call +or designate a new account to replace them as the sudo key. +Only one account can be the sudo key at a time. + +## Interface + +### Dispatchable Functions + +Only the sudo key can call the dispatchable functions from the Sudo module. + +* `sudo` - Make a `Root` call to a dispatchable function. +* `set_key` - Assign a new account to be the sudo key. + +## Usage + +### Executing Privileged Functions + +The Sudo module itself is not intended to be used within other modules. +Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in other modules. +You can execute these privileged functions by calling `sudo` with the sudo key account. +Privileged functions cannot be directly executed via an extrinsic. + +Learn more about privileged functions and `Root` origin in the [`Origin`] type documentation. + +### Simple Code Snippet + +This is an example of a module that exposes a privileged function: + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_root; + +pub trait Trait: frame_system::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn privileged_function(origin) -> dispatch::DispatchResult { + ensure_root(origin)?; + + // do something... + + Ok(()) + } + } +} +``` + +## Genesis Config + +The Sudo module depends on the [`GenesisConfig`](https://docs.rs/pallet-sudo/latest/pallet_sudo/struct.GenesisConfig.html). +You need to set an initial superuser account as the sudo `key`. + +## Related Modules + +* [Democracy](https://docs.rs/pallet-democracy/latest/pallet_democracy/) + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html +[`Origin`]: https://docs.substrate.dev/docs/substrate-types + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 113fa0dccc6c7d3db79a5be8e8f4058e126fc794..83e73d2ce434972ceb8497624340a1fabca6b072 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -225,11 +225,11 @@ decl_module! { decl_event!( pub enum Event where AccountId = ::AccountId { - /// A sudo just took place. [result] + /// A sudo just took place. \[result\] Sudid(DispatchResult), - /// The [sudoer] just switched identity; the old key is supplied. + /// The \[sudoer\] just switched identity; the old key is supplied. KeyChanged(AccountId), - /// A sudo just took place. [result] + /// A sudo just took place. \[result\] SudoAsDone(bool), } ); diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 5052d9c52d1b4245911fac94d212bf5e54ba9f54..7996cd05d071faee321cff2bd001bbff8350ebed 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -139,7 +139,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 79424d2824f9026d43d3ec04332896f545ba7eb2..cba1e1cf605404fb752ef1bcf0a207b13b2991b3 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -18,9 +18,9 @@ //! Tests for the module. use super::*; -use mock::{ +use mock::{ Sudo, SudoCall, Origin, Call, Test, new_test_ext, LoggerCall, Logger, System, TestEvent, -}; +}; use frame_support::{assert_ok, assert_noop}; #[test] @@ -28,8 +28,8 @@ fn test_setup_works() { // Environment setup, logger storage, and sudo `key` retrieval should work as expected. new_test_ext(1).execute_with(|| { assert_eq!(Sudo::key(), 1u64); - assert_eq!(Logger::i32_log(), vec![]); - assert_eq!(Logger::account_log(), vec![]); + assert!(Logger::i32_log().is_empty()); + assert!(Logger::account_log().is_empty()); }); } @@ -40,8 +40,8 @@ fn sudo_basics() { // A privileged function should work when `sudo` is passed the root `key` as `origin`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); assert_ok!(Sudo::sudo(Origin::signed(1), call)); - assert_eq!(Logger::i32_log(), vec![42i32]); - + assert_eq!(Logger::i32_log(), vec![42i32]); + // A privileged function should not work when `sudo` is passed a non-root `key` as `origin`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); assert_noop!(Sudo::sudo(Origin::signed(2), call), Error::::RequireSudo); @@ -58,7 +58,7 @@ fn sudo_emits_events_correctly() { let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); assert_ok!(Sudo::sudo(Origin::signed(1), call)); let expected_event = TestEvent::sudo(RawEvent::Sudid(Ok(()))); - assert!(System::events().iter().any(|a| a.event == expected_event)); + assert!(System::events().iter().any(|a| a.event == expected_event)); }) } @@ -68,16 +68,16 @@ fn sudo_unchecked_weight_basics() { // A privileged function should work when `sudo` is passed the root `key` as origin. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); - assert_eq!(Logger::i32_log(), vec![42i32]); + assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when called with a non-root `key`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); assert_noop!( - Sudo::sudo_unchecked_weight(Origin::signed(2), call, 1_000), + Sudo::sudo_unchecked_weight(Origin::signed(2), call, 1_000), Error::::RequireSudo, ); // `I32Log` is unchanged after unsuccessful call. - assert_eq!(Logger::i32_log(), vec![42i32]); + assert_eq!(Logger::i32_log(), vec![42i32]); // Controls the dispatched weight. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); @@ -103,7 +103,7 @@ fn sudo_unchecked_weight_emits_events_correctly() { #[test] fn set_key_basics() { - new_test_ext(1).execute_with(|| { + new_test_ext(1).execute_with(|| { // A root `key` can change the root `key` assert_ok!(Sudo::set_key(Origin::signed(1), 2)); assert_eq!(Sudo::key(), 2u64); @@ -117,7 +117,7 @@ fn set_key_basics() { #[test] fn set_key_emits_events_correctly() { - new_test_ext(1).execute_with(|| { + new_test_ext(1).execute_with(|| { // Set block number to 1 because events are not emitted on block 0. System::set_block_number(1); @@ -138,8 +138,8 @@ fn sudo_as_basics() { // A privileged function will not work when passed to `sudo_as`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); - assert_eq!(Logger::i32_log(), vec![]); - assert_eq!(Logger::account_log(), vec![]); + assert!(Logger::i32_log().is_empty()); + assert!(Logger::account_log().is_empty()); // A non-privileged function should not work when called with a non-root `key`. let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); @@ -156,7 +156,7 @@ fn sudo_as_basics() { #[test] fn sudo_as_emits_events_correctly() { - new_test_ext(1).execute_with(|| { + new_test_ext(1).execute_with(|| { // Set block number to 1 because events are not emitted on block 0. System::set_block_number(1); diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 24e898e1692fab89063dbe2b5f0896e58550324e..d082b718262b8c9aebdd19833a3ca21dee7a5bbe 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "frame-support" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Support code for the runtime." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,25 +16,25 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "11.0.0-rc5", default-features = false, path = "../metadata" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/tracing" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/arithmetic" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -frame-support-procedural = { version = "2.0.0-rc5", path = "./procedural" } +frame-metadata = { version = "12.0.0", default-features = false, path = "../metadata" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "2.0.0", default-features = false, path = "../../primitives/tracing" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "2.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +frame-support-procedural = { version = "2.0.0", path = "./procedural" } paste = "0.1.6" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } bitmask = { version = "0.5.0", default-features = false } impl-trait-for-tuples = "0.1.3" smallvec = "1.4.1" [dev-dependencies] pretty_assertions = "0.6.1" -frame-system = { version = "2.0.0-rc5", path = "../system" } +frame-system = { version = "2.0.0", path = "../system" } parity-util-mem = { version = "0.7.0", features = ["primitive-types"] } [features] diff --git a/frame/support/README.md b/frame/support/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2282870aca05ca8393e5d6bef1ddb059e0466a72 --- /dev/null +++ b/frame/support/README.md @@ -0,0 +1,3 @@ +Support code for the runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 622b92466546483f941ec77d089a5bfcda955168..24d166b75c396b2e1bfc8ea6ff3ffb1d65c62bdc 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -frame-support-procedural-tools = { version = "2.0.0-rc5", path = "./tools" } +frame-support-procedural-tools = { version = "2.0.0", path = "./tools" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full"] } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 57827b0673916da0b0002e6546f3ce46f8f90b80..c51582fdd05a16014ea1c5c61c0eb26b5b9b2427 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -19,15 +19,86 @@ mod parse; use frame_support_procedural_tools::syn_ext as ext; use frame_support_procedural_tools::{generate_crate_access, generate_hidden_includes}; -use parse::{ModuleDeclaration, RuntimeDefinition, WhereSection}; +use parse::{ModuleDeclaration, RuntimeDefinition, WhereSection, ModulePart}; use proc_macro::TokenStream; -use proc_macro2::TokenStream as TokenStream2; +use proc_macro2::{TokenStream as TokenStream2}; use quote::quote; use syn::{Ident, Result, TypePath}; +use std::collections::HashMap; /// The fixed name of the system module. const SYSTEM_MODULE_NAME: &str = "System"; +/// The complete definition of a module with the resulting fixed index. +#[derive(Debug, Clone)] +pub struct Module { + pub name: Ident, + pub index: u8, + pub module: Ident, + pub instance: Option, + pub module_parts: Vec, +} + +impl Module { + /// Get resolved module parts + fn module_parts(&self) -> &[ModulePart] { + &self.module_parts + } + + /// Find matching parts + fn find_part(&self, name: &str) -> Option<&ModulePart> { + self.module_parts.iter().find(|part| part.name() == name) + } + + /// Return whether module contains part + fn exists_part(&self, name: &str) -> bool { + self.find_part(name).is_some() + } +} + +/// Convert from the parsed module to their final information. +/// Assign index to each modules using same rules as rust for fieldless enum. +/// I.e. implicit are assigned number incrementedly from last explicit or 0. +fn complete_modules(decl: impl Iterator) -> syn::Result> { + let mut indices = HashMap::new(); + let mut last_index: Option = None; + + decl + .map(|module| { + let final_index = match module.index { + Some(i) => i, + None => last_index.map_or(Some(0), |i| i.checked_add(1)) + .ok_or_else(|| { + let msg = "Module index doesn't fit into u8, index is 256"; + syn::Error::new(module.name.span(), msg) + })?, + }; + + last_index = Some(final_index); + + if let Some(used_module) = indices.insert(final_index, module.name.clone()) { + let msg = format!( + "Module indices are conflicting: Both modules {} and {} are at index {}", + used_module, + module.name, + final_index, + ); + let mut err = syn::Error::new(used_module.span(), &msg); + err.combine(syn::Error::new(module.name.span(), msg)); + return Err(err); + } + + Ok(Module { + name: module.name, + index: final_index, + module: module.module, + instance: module.instance, + module_parts: module.module_parts, + }) + }) + .collect() +} + pub fn construct_runtime(input: TokenStream) -> TokenStream { let definition = syn::parse_macro_input!(input as RuntimeDefinition); construct_runtime_parsed(definition) @@ -52,17 +123,16 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result sm, - None => { - return Err(syn::Error::new( - modules_token.span, - "`System` module declaration is missing. \ - Please add this line: `System: frame_system::{Module, Call, Storage, Config, Event},`", - )) - } - }; + let modules = complete_modules(modules.into_iter())?; + + let system_module = modules.iter() + .find(|decl| decl.name == SYSTEM_MODULE_NAME) + .ok_or_else(|| syn::Error::new( + modules_token.span, + "`System` module declaration is missing. \ + Please add this line: `System: frame_system::{Module, Call, Storage, Config, Event},`", + ))?; + let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); @@ -77,12 +147,12 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result Result( runtime: &'a Ident, - module_declarations: impl Iterator, + module_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { let modules_tokens = module_declarations @@ -152,7 +222,7 @@ fn decl_validate_unsigned<'a>( fn decl_outer_inherent<'a>( block: &'a syn::TypePath, unchecked_extrinsic: &'a syn::TypePath, - module_declarations: impl Iterator, + module_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { let modules_tokens = module_declarations.filter_map(|module_declaration| { @@ -176,7 +246,7 @@ fn decl_outer_inherent<'a>( fn decl_outer_config<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + module_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { let modules_tokens = module_declarations @@ -214,7 +284,7 @@ fn decl_outer_config<'a>( fn decl_runtime_metadata<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + module_declarations: impl Iterator, scrate: &'a TokenStream2, extrinsic: &TypePath, ) -> TokenStream2 { @@ -238,7 +308,12 @@ fn decl_runtime_metadata<'a>( .as_ref() .map(|name| quote!(<#name>)) .into_iter(); - quote!(#module::Module #(#instance)* as #name with #(#filtered_names)* ,) + + let index = module_declaration.index; + + quote!( + #module::Module #(#instance)* as #name { index #index } with #(#filtered_names)*, + ) }); quote!( #scrate::impl_runtime_metadata!{ @@ -250,7 +325,7 @@ fn decl_runtime_metadata<'a>( fn decl_outer_dispatch<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + module_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> TokenStream2 { let modules_tokens = module_declarations @@ -258,8 +333,10 @@ fn decl_outer_dispatch<'a>( .map(|module_declaration| { let module = &module_declaration.module; let name = &module_declaration.name; - quote!(#module::#name) + let index = module_declaration.index.to_string(); + quote!(#[codec(index = #index)] #module::#name) }); + quote!( #scrate::impl_outer_dispatch! { pub enum Call for #runtime where origin: Origin { @@ -271,12 +348,12 @@ fn decl_outer_dispatch<'a>( fn decl_outer_origin<'a>( runtime_name: &'a Ident, - module_declarations: impl Iterator, - system_name: &'a Ident, + modules_except_system: impl Iterator, + system_module: &'a Module, scrate: &'a TokenStream2, ) -> syn::Result { let mut modules_tokens = TokenStream2::new(); - for module_declaration in module_declarations { + for module_declaration in modules_except_system { match module_declaration.find_part("Origin") { Some(module_entry) => { let module = &module_declaration.module; @@ -290,16 +367,23 @@ fn decl_outer_origin<'a>( ); return Err(syn::Error::new(module_declaration.name.span(), msg)); } - let tokens = quote!(#module #instance #generics ,); + let index = module_declaration.index.to_string(); + let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); modules_tokens.extend(tokens); } None => {} } } + let system_name = &system_module.module; + let system_index = system_module.index.to_string(); + Ok(quote!( #scrate::impl_outer_origin! { - pub enum Origin for #runtime_name where system = #system_name { + pub enum Origin for #runtime_name where + system = #system_name, + system_index = #system_index + { #modules_tokens } } @@ -308,7 +392,7 @@ fn decl_outer_origin<'a>( fn decl_outer_event<'a>( runtime_name: &'a Ident, - module_declarations: impl Iterator, + module_declarations: impl Iterator, scrate: &'a TokenStream2, ) -> syn::Result { let mut modules_tokens = TokenStream2::new(); @@ -326,7 +410,9 @@ fn decl_outer_event<'a>( ); return Err(syn::Error::new(module_declaration.name.span(), msg)); } - let tokens = quote!(#module #instance #generics ,); + + let index = module_declaration.index.to_string(); + let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); modules_tokens.extend(tokens); } None => {} @@ -344,7 +430,7 @@ fn decl_outer_event<'a>( fn decl_all_modules<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, + module_declarations: impl Iterator, ) -> TokenStream2 { let mut types = TokenStream2::new(); let mut names = Vec::new(); @@ -376,22 +462,24 @@ fn decl_all_modules<'a>( ) } -fn decl_module_to_index<'a>( - module_declarations: impl Iterator, - num_modules: usize, +fn decl_pallet_runtime_setup( + module_declarations: &[Module], scrate: &TokenStream2, ) -> TokenStream2 { - let names = module_declarations.map(|d| &d.name); - let indices = 0..num_modules; + let names = module_declarations.iter().map(|d| &d.name); + let names2 = module_declarations.iter().map(|d| &d.name); + let name_strings = module_declarations.iter().map(|d| d.name.to_string()); + let indices = module_declarations.iter() + .map(|module| module.index as usize); quote!( - /// Provides an implementation of `ModuleToIndex` to map a module - /// to its index in the runtime. - pub struct ModuleToIndex; + /// Provides an implementation of `PalletInfo` to provide information + /// about the pallet setup in the runtime. + pub struct PalletInfo; - impl #scrate::traits::ModuleToIndex for ModuleToIndex { - fn module_to_index() -> Option { - let type_id = #scrate::sp_std::any::TypeId::of::(); + impl #scrate::traits::PalletInfo for PalletInfo { + fn index() -> Option { + let type_id = #scrate::sp_std::any::TypeId::of::

(); #( if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#indices) @@ -400,18 +488,21 @@ fn decl_module_to_index<'a>( None } + + fn name() -> Option<&'static str> { + let type_id = #scrate::sp_std::any::TypeId::of::

(); + #( + if type_id == #scrate::sp_std::any::TypeId::of::<#names2>() { + return Some(#name_strings) + } + )* + + None + } } ) } -fn find_system_module<'a>( - mut module_declarations: impl Iterator, -) -> Option<&'a Ident> { - module_declarations - .find(|decl| decl.name == SYSTEM_MODULE_NAME) - .map(|decl| &decl.module) -} - fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { quote!( #[cfg(test)] diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index c8481480baac5e452b8294fec85bc367c59244ab..4a45044d67f25f8f75481471072a93b7200360c8 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -149,9 +149,11 @@ impl Parse for WhereDefinition { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ModuleDeclaration { pub name: Ident, + /// Optional fixed index (e.g. `MyPallet ... = 3,`) + pub index: Option, pub module: Ident, pub instance: Option, pub module_parts: Vec, @@ -175,32 +177,27 @@ impl Parse for ModuleDeclaration { let _: Token![::] = input.parse()?; let module_parts = parse_module_parts(input)?; + let index = if input.peek(Token![=]) { + input.parse::()?; + let index = input.parse::()?; + let index = index.base10_parse::()?; + Some(index) + } else { + None + }; + let parsed = Self { name, module, instance, module_parts, + index, }; Ok(parsed) } } -impl ModuleDeclaration { - /// Get resolved module parts - pub fn module_parts(&self) -> &[ModulePart] { - &self.module_parts - } - - pub fn find_part(&self, name: &str) -> Option<&ModulePart> { - self.module_parts.iter().find(|part| part.name() == name) - } - - pub fn exists_part(&self, name: &str) -> bool { - self.find_part(name).is_some() - } -} - /// Parse [`ModulePart`]'s from a braces enclosed list that is split by commas, e.g. /// /// `{ Call, Event }` diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 054d90d7bbaebfbc0324c523f6f048a56b3d6bef..bf87bd552fd0663963c766f5333bbe559fee9607 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -15,9 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// tag::description[] //! Proc macro of Support code for the runtime. -// end::description[] #![recursion_limit="512"] @@ -254,13 +252,13 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// NodeBlock = runtime::Block, /// UncheckedExtrinsic = UncheckedExtrinsic /// { -/// System: system::{Module, Call, Event, Config}, -/// Test: test::{Module, Call}, -/// Test2: test_with_long_module::{Module}, +/// System: system::{Module, Call, Event, Config} = 0, +/// Test: test::{Module, Call} = 1, +/// Test2: test_with_long_module::{Module, Event}, /// /// // Module with instances /// Test3_Instance1: test3::::{Module, Call, Storage, Event, Config, Origin}, -/// Test3_DefaultInstance: test3::{Module, Call, Storage, Event, Config, Origin}, +/// Test3_DefaultInstance: test3::{Module, Call, Storage, Event, Config, Origin} = 4, /// } /// ) /// ``` @@ -281,6 +279,18 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// - `Inherent` - If the module provides/can check inherents. /// - `ValidateUnsigned` - If the module validates unsigned extrinsics. /// +/// `= $n` is an optional part allowing to define at which index the module variants in +/// `OriginCaller`, `Call` and `Event` are encoded, and to define the ModuleToIndex value. +/// +/// if `= $n` is not given, then index is resolved same as fieldless enum in Rust +/// (i.e. incrementedly from previous index): +/// ```nocompile +/// module1 .. = 2, +/// module2 .., // Here module2 is given index 3 +/// module3 .. = 0, +/// module4 .., // Here module4 is given index 1 +/// ``` +/// /// # Note /// /// The population of the genesis storage depends on the order of modules. So, if one of your @@ -296,7 +306,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// The return type of the annotated function must be `Result`. All changes to storage performed /// by the annotated function are discarded if it returns `Err`, or committed if `Ok`. /// -/// #Example +/// # Example /// /// ```nocompile /// #[transactional] @@ -313,5 +323,5 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// ``` #[proc_macro_attribute] pub fn transactional(attr: TokenStream, input: TokenStream) -> TokenStream { - transactional::transactional(attr, input) + transactional::transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) } diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index b42639c30c5b9b8a05e4fb383e19d7d64abc8861..0aa0a3cad7cd1e1ec8b55f57781ffa2bf7265a51 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -416,6 +416,8 @@ pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStr StorageMap as _, StorageDoubleMap as _, StoragePrefixedMap as _, + IterableStorageMap as _, + IterableStorageDoubleMap as _, }; #scrate_decl diff --git a/frame/support/procedural/src/transactional.rs b/frame/support/procedural/src/transactional.rs index a001f44c4d482ba9d2df14a5308a7a5a0b0091b7..fbd0c9ca0b3c4a4af8d9364d4c760c5cde68f8f4 100644 --- a/frame/support/procedural/src/transactional.rs +++ b/frame/support/procedural/src/transactional.rs @@ -17,15 +17,17 @@ use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, ItemFn}; +use syn::{ItemFn, Result}; +use frame_support_procedural_tools::generate_crate_access_2018; -pub fn transactional(_attr: TokenStream, input: TokenStream) -> TokenStream { - let ItemFn { attrs, vis, sig, block } = parse_macro_input!(input as ItemFn); +pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result { + let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; + let crate_ = generate_crate_access_2018()?; let output = quote! { #(#attrs)* - #vis #sig { - use frame_support::storage::{with_transaction, TransactionOutcome}; + #vis #sig { + use #crate_::storage::{with_transaction, TransactionOutcome}; with_transaction(|| { let r = #block; if r.is_ok() { @@ -34,7 +36,8 @@ pub fn transactional(_attr: TokenStream, input: TokenStream) -> TokenStream { TransactionOutcome::Rollback(r) } }) - } - }; - output.into() + } + }; + + Ok(output.into()) } diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 2c9a66baac51d4d1bf877a075f804ca29bf38b55..2cff2473b85d4415386e137695f7fc73a9ba8347 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,7 +12,7 @@ description = "Proc macro helpers for procedural macros" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-support-procedural-tools-derive = { version = "2.0.0-rc5", path = "./derive" } +frame-support-procedural-tools-derive = { version = "2.0.0", path = "./derive" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full", "visit"] } diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index fceaeaf08d2f1369a1ab518fc699e3baa42b9c01..b616dd790d61e938d490da610ee87e678c115ba7 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools-derive" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/frame/support/procedural/tools/src/lib.rs b/frame/support/procedural/tools/src/lib.rs index 0033787a7c0450629c693037f73fd43e37d76504..c5a27c809aff8edcbe5329da2bfe2e15473abeff 100644 --- a/frame/support/procedural/tools/src/lib.rs +++ b/frame/support/procedural/tools/src/lib.rs @@ -46,6 +46,25 @@ pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { } } +/// Generate the crate access for the `frame-support` crate using 2018 syntax. +/// +/// Output will for example be `frame_support`. +pub fn generate_crate_access_2018() -> Result { + if std::env::var("CARGO_PKG_NAME").unwrap() == "frame-support" { + Ok(quote::quote!( frame_support )) + } else { + match crate_name("frame-support") { + Ok(name) => { + let name = Ident::new(&name, Span::call_site()); + Ok(quote!( #name )) + }, + Err(e) => { + Err(Error::new(Span::call_site(), &e)) + } + } + } +} + /// Generates the hidden includes that are required to make the macro independent from its scope. pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream { if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { diff --git a/frame/support/src/debug.rs b/frame/support/src/debug.rs index e4a48068460c6cc038577d7d8eae9304617094fa..86b40f1664dcf9e37e4395e35a00bc81efa342b7 100644 --- a/frame/support/src/debug.rs +++ b/frame/support/src/debug.rs @@ -87,11 +87,11 @@ //! native::print!("My struct: {:?}", x); //! ``` -use sp_std::vec::Vec; use sp_std::fmt::{self, Debug}; pub use log::{info, debug, error, trace, warn}; pub use crate::runtime_print as print; +pub use sp_std::Writer; /// Native-only logging. /// @@ -132,9 +132,9 @@ macro_rules! runtime_print { ($($arg:tt)+) => { { use core::fmt::Write; - let mut w = $crate::debug::Writer::default(); + let mut w = $crate::sp_std::Writer::default(); let _ = core::write!(&mut w, $($arg)+); - w.print(); + sp_io::misc::print_utf8(&w.inner()) } } } @@ -144,24 +144,6 @@ pub fn debug(data: &impl Debug) { runtime_print!("{:?}", data); } -/// A target for `core::write!` macro - constructs a string in memory. -#[derive(Default)] -pub struct Writer(Vec); - -impl fmt::Write for Writer { - fn write_str(&mut self, s: &str) -> fmt::Result { - self.0.extend(s.as_bytes()); - Ok(()) - } -} - -impl Writer { - /// Print the content of this `Writer` out. - pub fn print(&self) { - sp_io::misc::print_utf8(&self.0) - } -} - /// Runtime logger implementation - `log` crate backend. /// /// The logger should be initialized if you want to display @@ -204,13 +186,13 @@ impl log::Log for RuntimeLogger { fn log(&self, record: &log::Record) { use fmt::Write; - let mut w = Writer::default(); + let mut w = sp_std::Writer::default(); let _ = core::write!(&mut w, "{}", record.args()); sp_io::logging::log( record.level().into(), record.target(), - &w.0, + w.inner(), ); } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 442a99effadbc646903661c86b6a7e374241ba5d..02e03ec6e6d6df1e1bf261f788b1f951540fb9a8 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -106,7 +106,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// ### Shorthand Example /// /// The macro automatically expands a shorthand function declaration to return the -/// [`DispatchResult`](dispatch::DispatchResult) type. These functions are the same: +/// [`DispatchResult`] type. These functions are the same: /// /// ``` /// # #[macro_use] @@ -1265,43 +1265,46 @@ macro_rules! decl_module { }; (@impl_on_initialize + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn on_initialize() -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_initialize(_block_number_not_used: $trait_instance::BlockNumber) -> $return { - $crate::sp_tracing::enter_span!("on_initialize"); + fn on_initialize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) -> $return { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_initialize")); { $( $impl )* } } } }; (@impl_on_initialize + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn on_initialize($param:ident : $param_ty:ty) -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_initialize($param: $param_ty) -> $return { - $crate::sp_tracing::enter_span!("on_initialize"); + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_initialize")); { $( $impl )* } } } }; (@impl_on_initialize + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1316,7 +1319,7 @@ macro_rules! decl_module { for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_runtime_upgrade() -> $return { - $crate::sp_tracing::enter_span!("on_runtime_upgrade"); + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); { $( $impl )* } } } @@ -1362,68 +1365,73 @@ macro_rules! decl_module { }; (@impl_on_finalize + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn on_finalize() { $( $impl:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_finalize(_block_number_not_used: $trait_instance::BlockNumber) { - $crate::sp_tracing::enter_span!("on_finalize"); + fn on_finalize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_finalize")); { $( $impl )* } } } }; (@impl_on_finalize + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn on_finalize($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_finalize($param: $param_ty) { - $crate::sp_tracing::enter_span!("on_finalize"); + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_finalize")); { $( $impl )* } } } }; (@impl_on_finalize + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { } }; (@impl_offchain + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn offchain_worker() { $( $impl:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn offchain_worker(_block_number_not_used: $trait_instance::BlockNumber) { $( $impl )* } + fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { $( $impl )* } } }; (@impl_offchain + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn offchain_worker($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn offchain_worker($param: $param_ty) { $( $impl )* } @@ -1431,11 +1439,12 @@ macro_rules! decl_module { }; (@impl_offchain + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<$trait_instance::BlockNumber> + impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1456,7 +1465,7 @@ macro_rules! decl_module { $vis fn $name( $origin: $origin_ty $(, $param: $param_ty )* ) -> $crate::dispatch::DispatchResult { - $crate::sp_tracing::enter_span!(stringify!($name)); + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); { $( $impl )* } Ok(()) } @@ -1475,7 +1484,7 @@ macro_rules! decl_module { ) => { $(#[$fn_attr])* $vis fn $name($origin: $origin_ty $(, $param: $param_ty )* ) -> $result { - $crate::sp_tracing::enter_span!(stringify!($name)); + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); $( $impl )* } }; @@ -1635,6 +1644,7 @@ macro_rules! decl_module { $crate::decl_module! { @impl_on_initialize + { $system } $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; { $( $other_where_bounds )* } $( $on_initialize )* @@ -1649,6 +1659,7 @@ macro_rules! decl_module { $crate::decl_module! { @impl_on_finalize + { $system } $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; { $( $other_where_bounds )* } $( $on_finalize )* @@ -1656,6 +1667,7 @@ macro_rules! decl_module { $crate::decl_module! { @impl_offchain + { $system } $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; { $( $other_where_bounds )* } $( $offchain )* @@ -1899,7 +1911,7 @@ macro_rules! impl_outer_dispatch { $(#[$attr:meta])* pub enum $call_type:ident for $runtime:ident where origin: $origin:ty { $( - $module:ident::$camelcase:ident, + $( #[codec(index = $index:tt)] )? $module:ident::$camelcase:ident, )* } ) => { @@ -1912,6 +1924,7 @@ macro_rules! impl_outer_dispatch { )] pub enum $call_type { $( + $( #[codec(index = $index)] )? $camelcase ( $crate::dispatch::CallableCallFor<$camelcase, $runtime> ) ,)* } @@ -2055,6 +2068,7 @@ macro_rules! __dispatch_impl_metadata { where $( $other_where_bounds )* { #[doc(hidden)] + #[allow(dead_code)] pub fn call_functions() -> &'static [$crate::dispatch::FunctionMetadata] { $crate::__call_to_functions!($($rest)*) } @@ -2128,6 +2142,7 @@ macro_rules! __impl_module_constants_metadata { $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { #[doc(hidden)] + #[allow(dead_code)] pub fn module_constants_metadata() -> &'static [$crate::dispatch::ModuleConstantMetadata] { // Create the `ByteGetter`s $( @@ -2345,9 +2360,7 @@ mod tests { IntegrityTest, }; - pub trait Trait: system::Trait + Sized where Self::AccountId: From { - type BlockNumber: Into; - } + pub trait Trait: system::Trait + Sized where Self::AccountId: From { } pub mod system { use codec::{Encode, Decode}; @@ -2357,6 +2370,7 @@ mod tests { type Call; type BaseCallFilter; type Origin: crate::traits::OriginTrait; + type BlockNumber: Into; } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] @@ -2480,10 +2494,7 @@ mod tests { ]; pub struct TraitImpl {} - - impl Trait for TraitImpl { - type BlockNumber = u32; - } + impl Trait for TraitImpl { } type Test = Module; @@ -2502,6 +2513,7 @@ mod tests { type AccountId = u32; type Call = OuterCall; type BaseCallFilter = (); + type BlockNumber = u32; } #[test] diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index d758ad52e72b3ba7fe10ceee4d978c7bd722887c..ef963f3c597336ee8c87fba37de939fec201043b 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -140,8 +140,8 @@ macro_rules! decl_error { for $crate::sp_runtime::DispatchError { fn from(err: $error<$generic $(, $inst_generic)?>) -> Self { - let index = <$generic::ModuleToIndex as $crate::traits::ModuleToIndex> - ::module_to_index::<$module<$generic $(, $inst_generic)?>>() + let index = <$generic::PalletInfo as $crate::traits::PalletInfo> + ::index::<$module<$generic $(, $inst_generic)?>>() .expect("Every active module has an index in the runtime; qed") as u8; $crate::sp_runtime::DispatchError::Module { diff --git a/frame/support/src/event.rs b/frame/support/src/event.rs index 1184b379f44469d72278dd1562223ef51631c6d4..0f889f97f40a09adb061877e9e266f0d0fc29fb8 100644 --- a/frame/support/src/event.rs +++ b/frame/support/src/event.rs @@ -346,7 +346,7 @@ macro_rules! impl_outer_event { $name; $runtime; Modules { $( $rest_events )* }; - ; + {}; ); }; // Generic + Instance @@ -355,17 +355,17 @@ macro_rules! impl_outer_event { $name:ident; $runtime:ident; Modules { - $module:ident $instance:ident, + $( #[codec(index = $index:tt)] )? $module:ident $instance:ident, $( $rest_event_generic_instance:tt )* }; - $( $module_name:ident::Event $( <$generic_param:ident> )? $( { $generic_instance:ident } )?, )*; + { $( $parsed:tt )* }; ) => { $crate::impl_outer_event!( $( #[$attr] )*; $name; $runtime; Modules { $( $rest_event_generic_instance )* }; - $( $module_name::Event $( <$generic_param> )? $( { $generic_instance } )?, )* $module::Event<$runtime>{ $instance },; + { $( $parsed )* $module::Event<$runtime>{ $instance } index { $( $index )? }, }; ); }; // Instance @@ -374,17 +374,17 @@ macro_rules! impl_outer_event { $name:ident; $runtime:ident; Modules { - $module:ident $instance:ident, + $( #[codec(index = $index:tt)] )? $module:ident $instance:ident, $( $rest_event_instance:tt )* }; - $( $module_name:ident::Event $( <$generic_param:ident> )? $( { $generic_instance:ident } )?, )*; + { $( $parsed:tt )* }; ) => { $crate::impl_outer_event!( $( #[$attr] )*; $name; $runtime; Modules { $( $rest_event_instance )* }; - $( $module_name::Event $( <$generic_param> )* $( { $generic_instance } )?, )* $module::Event { $instance },; + { $( $parsed )* $module::Event { $instance } index { $( $index )? }, }; ); }; // Generic @@ -393,17 +393,17 @@ macro_rules! impl_outer_event { $name:ident; $runtime:ident; Modules { - $module:ident, + $( #[codec(index = $index:tt)] )? $module:ident, $( $rest_event_generic:tt )* }; - $( $module_name:ident::Event $( <$generic_param:ident> )? $( { $generic_instance:ident } )?, )*; + { $( $parsed:tt )* }; ) => { $crate::impl_outer_event!( $( #[$attr] )*; $name; $runtime; Modules { $( $rest_event_generic )* }; - $( $module_name::Event $( <$generic_param> )? $( { $generic_instance } )?, )* $module::Event<$runtime>,; + { $( $parsed )* $module::Event<$runtime> index { $( $index )? }, }; ); }; // No Generic and no Instance @@ -412,17 +412,17 @@ macro_rules! impl_outer_event { $name:ident; $runtime:ident; Modules { - $module:ident, + $( #[codec(index = $index:tt)] )? $module:ident, $( $rest_event_no_generic_no_instance:tt )* }; - $( $module_name:ident::Event $( <$generic_param:ident> )? $( { $generic_instance:ident } )?, )*; + { $( $parsed:tt )* }; ) => { $crate::impl_outer_event!( $( #[$attr] )*; $name; $runtime; Modules { $( $rest_event_no_generic_no_instance )* }; - $( $module_name::Event $( <$generic_param> )? $( { $generic_instance } )?, )* $module::Event,; + { $( $parsed )* $module::Event index { $( $index )? }, }; ); }; @@ -432,7 +432,14 @@ macro_rules! impl_outer_event { $name:ident; $runtime:ident; Modules {}; - $( $module_name:ident::Event $( <$generic_param:ident> )? $( { $generic_instance:ident } )?, )*; + { + $( + $module_name:ident::Event + $( <$generic_param:ident> )? + $( { $generic_instance:ident } )? + index { $( $index:tt )? }, + )* + }; ) => { $crate::paste::item! { #[derive( @@ -445,6 +452,7 @@ macro_rules! impl_outer_event { #[allow(non_camel_case_types)] pub enum $name { $( + $( #[codec(index = $index)] )? [< $module_name $(_ $generic_instance )? >]( $module_name::Event < $( $generic_param )? $(, $module_name::$generic_instance )? > ), @@ -697,7 +705,7 @@ mod tests { pub enum TestEventSystemRenamed for TestRuntime2 { system_renamed, event_module, - event_module2, + #[codec(index = "5")] event_module2, event_module3, } } @@ -796,4 +804,22 @@ mod tests { fn outer_event_metadata() { assert_eq!(EXPECTED_METADATA, TestRuntime::outer_event_metadata()); } + + #[test] + fn test_codec() { + let runtime_1_event_module_2 = TestEvent::event_module2( + event_module2::Event::::TestEvent(3) + ); + assert_eq!(runtime_1_event_module_2.encode()[0], 2); + + let runtime_2_event_module_2 = TestEventSystemRenamed::event_module2( + event_module2::Event::::TestEvent(3) + ); + assert_eq!(runtime_2_event_module_2.encode()[0], 5); + + let runtime_2_event_module_3 = TestEventSystemRenamed::event_module3( + event_module3::Event::HiEvent + ); + assert_eq!(runtime_2_event_module_3.encode()[0], 3); + } } diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs index aa7d71b52e0b6b183a797353986fd8d452b1559b..9ae1d6ce663d5d64ce259e764aeaa02b36106c47 100644 --- a/frame/support/src/metadata.rs +++ b/frame/support/src/metadata.rs @@ -51,9 +51,9 @@ pub use frame_metadata::{ /// struct Runtime; /// frame_support::impl_runtime_metadata! { /// for Runtime with modules where Extrinsic = UncheckedExtrinsic -/// module0::Module as Module0 with, -/// module1::Module as Module1 with, -/// module2::Module as Module2 with Storage, +/// module0::Module as Module0 { index 0 } with, +/// module1::Module as Module1 { index 1 } with, +/// module2::Module as Module2 { index 2 } with Storage, /// }; /// ``` /// @@ -91,13 +91,17 @@ macro_rules! __runtime_modules_to_metadata { ( $runtime: ident; $( $metadata:expr ),*; - $mod:ident::$module:ident $( < $instance:ident > )? as $name:ident $(with)+ $($kw:ident)*, + $mod:ident::$module:ident $( < $instance:ident > )? as $name:ident + { index $index:tt } + $(with)+ $($kw:ident)* + , $( $rest:tt )* ) => { $crate::__runtime_modules_to_metadata!( $runtime; $( $metadata, )* $crate::metadata::ModuleMetadata { name: $crate::metadata::DecodeDifferent::Encode(stringify!($name)), + index: $index, storage: $crate::__runtime_modules_to_metadata_calls_storage!( $mod, $module $( <$instance> )?, $runtime, $(with $kw)* ), @@ -297,7 +301,7 @@ mod tests { type AccountId: From + Encode; type BlockNumber: From + Encode; type SomeValue: Get; - type ModuleToIndex: crate::traits::ModuleToIndex; + type PalletInfo: crate::traits::PalletInfo; type Call; } @@ -443,15 +447,15 @@ mod tests { type AccountId = u32; type BlockNumber = u32; type SomeValue = SystemValue; - type ModuleToIndex = (); + type PalletInfo = (); type Call = Call; } impl_runtime_metadata!( for TestRuntime with modules where Extrinsic = TestExtrinsic - system::Module as System with Event, - event_module::Module as Module with Event Call, - event_module2::Module as Module2 with Event Storage Call, + system::Module as System { index 0 } with Event, + event_module::Module as Module { index 1 } with Event Call, + event_module2::Module as Module2 { index 2 } with Event Storage Call, ); struct ConstantBlockNumberByteGetter; @@ -481,6 +485,7 @@ mod tests { modules: DecodeDifferent::Encode(&[ ModuleMetadata { name: DecodeDifferent::Encode("System"), + index: 0, storage: None, calls: None, event: Some(DecodeDifferent::Encode( @@ -524,6 +529,7 @@ mod tests { }, ModuleMetadata { name: DecodeDifferent::Encode("Module"), + index: 1, storage: None, calls: Some( DecodeDifferent::Encode(FnEncode(|| &[ @@ -559,6 +565,7 @@ mod tests { }, ModuleMetadata { name: DecodeDifferent::Encode("Module2"), + index: 2, storage: Some(DecodeDifferent::Encode( FnEncode(|| StorageMetadata { prefix: DecodeDifferent::Encode("TestStorage"), diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs index df75f8dc65645ee78c5cc71df4830ea8ae43c52a..e4052337a01418363233c03c172755402b80678c 100644 --- a/frame/support/src/origin.rs +++ b/frame/support/src/origin.rs @@ -41,7 +41,10 @@ macro_rules! impl_outer_origin { ( $(#[$attr:meta])* - pub enum $name:ident for $runtime:ident where system = $system:ident { + pub enum $name:ident for $runtime:ident where + system = $system:ident + $(, system_index = $system_index:tt)? + { $( $rest_with_system:tt )* } ) => { @@ -52,6 +55,7 @@ macro_rules! impl_outer_origin { [< $name Caller >]; $runtime; $system; + system_index { $( $system_index )? }; Modules { $( $rest_with_system )* }; ); } @@ -64,8 +68,9 @@ macro_rules! impl_outer_origin { $caller_name:ident; $runtime:ident; $system:ident; + system_index { $( $system_index:tt )? }; Modules { - $module:ident $instance:ident + $( #[codec(index = $index:tt)] )? $module:ident $instance:ident $(, $( $rest_module:tt )* )? }; $( $parsed:tt )* @@ -76,8 +81,9 @@ macro_rules! impl_outer_origin { $caller_name; $runtime; $system; + system_index { $( $system_index )? }; Modules { $( $( $rest_module )* )? }; - $( $parsed )* $module <$runtime> { $instance }, + $( $parsed )* $module <$runtime> { $instance } index { $( $index )? }, ); }; @@ -88,8 +94,9 @@ macro_rules! impl_outer_origin { $caller_name:ident; $runtime:ident; $system:ident; + system_index { $( $system_index:tt )? }; Modules { - $module:ident $instance:ident + $( #[codec(index = $index:tt )] )? $module:ident $instance:ident $(, $rest_module:tt )* }; $( $parsed:tt )* @@ -100,8 +107,9 @@ macro_rules! impl_outer_origin { $caller_name; $runtime; $system; + system_index { $( $system_index )? }; Modules { $( $rest_module )* }; - $( $parsed )* $module { $instance }, + $( $parsed )* $module { $instance } index { $( $index )? }, ); }; @@ -112,8 +120,9 @@ macro_rules! impl_outer_origin { $caller_name:ident; $runtime:ident; $system:ident; + system_index { $( $system_index:tt )? }; Modules { - $module:ident + $( #[codec(index = $index:tt )] )? $module:ident $(, $( $rest_module:tt )* )? }; $( $parsed:tt )* @@ -124,8 +133,9 @@ macro_rules! impl_outer_origin { $caller_name; $runtime; $system; + system_index { $( $system_index )? }; Modules { $( $( $rest_module )* )? }; - $( $parsed )* $module <$runtime>, + $( $parsed )* $module <$runtime> index { $( $index )? }, ); }; @@ -136,8 +146,9 @@ macro_rules! impl_outer_origin { $caller_name:ident; $runtime:ident; $system:ident; + system_index { $( $system_index:tt )? }; Modules { - $module:ident + $( #[codec(index = $index:tt )] )? $module:ident $(, $( $rest_module:tt )* )? }; $( $parsed:tt )* @@ -148,8 +159,9 @@ macro_rules! impl_outer_origin { $caller_name; $runtime; $system; + system_index { $( $system_index )? }; Modules { $( $( $rest_module )* )? }; - $( $parsed )* $module, + $( $parsed )* $module index { $( $index )? }, ); }; @@ -160,8 +172,14 @@ macro_rules! impl_outer_origin { $caller_name:ident; $runtime:ident; $system:ident; + system_index { $( $system_index:tt )? }; Modules { }; - $( $module:ident $( < $generic:ident > )? $( { $generic_instance:ident } )? ,)* + $( + $module:ident + $( < $generic:ident > )? + $( { $generic_instance:ident } )? + index { $( $index:tt )? }, + )* ) => { // WARNING: All instance must hold the filter `frame_system::Trait::BaseCallFilter`, except // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. @@ -233,8 +251,10 @@ macro_rules! impl_outer_origin { $(#[$attr])* #[allow(non_camel_case_types)] pub enum $caller_name { + $( #[codec(index = $system_index)] )? system($system::Origin<$runtime>), $( + $( #[codec(index = $index)] )? [< $module $( _ $generic_instance )? >] ($module::Origin < $( $generic, )? $( $module::$generic_instance )? > ), )* @@ -442,6 +462,13 @@ mod tests { pub enum OriginEmpty for TestRuntime where system = frame_system {} ); + impl_outer_origin!( + pub enum OriginIndices for TestRuntime where system = frame_system, system_index = "11" { + origin_with_generic, + #[codec(index = "10")] origin_without_generic, + } + ); + #[test] fn test_default_filter() { assert_eq!(OriginWithSystem::root().filter_call(&0), true); @@ -472,4 +499,20 @@ mod tests { assert_eq!(origin.filter_call(&0), true); assert_eq!(origin.filter_call(&1), false); } + + #[test] + fn test_codec() { + use codec::Encode; + assert_eq!(OriginIndices::root().caller.encode()[0], 11); + let without_generic_variant = OriginIndicesCaller::origin_without_generic( + origin_without_generic::Origin + ); + assert_eq!(without_generic_variant.encode()[0], 10); + + assert_eq!(OriginWithoutSystem::root().caller.encode()[0], 0); + let without_generic_variant = OriginWithoutSystemCaller::origin_without_generic( + origin_without_generic::Origin + ); + assert_eq!(without_generic_variant.encode()[0], 1); + } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 3c82f4156a27182ff4f5edd382845aed3471ab90..9454ab401da281db4a56f518880d940deb235a41 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -18,7 +18,7 @@ use sp_std::prelude::*; use sp_std::borrow::Borrow; use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike}; -use crate::{storage::{self, unhashed, StorageAppend}, Never}; +use crate::{storage::{self, unhashed, StorageAppend, PrefixIterator}, Never}; use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; /// Generator for `StorageDoubleMap` used by `decl_storage`. @@ -213,10 +213,11 @@ impl storage::StorageDoubleMap for G where KArg1: ?Sized + EncodeLike { let prefix = Self::storage_double_map_final_key1(k1); - storage::PrefixIterator:: { + storage::PrefixIterator { prefix: prefix.clone(), previous_key: prefix, - phantom_data: Default::default(), + drain: false, + closure: |_raw_key, mut raw_value| V::decode(&mut raw_value), } } @@ -322,54 +323,6 @@ impl storage::StorageDoubleMap for G where } } -/// Iterate over a prefix and decode raw_key and raw_value into `T`. -pub struct MapIterator { - prefix: Vec, - previous_key: Vec, - /// If true then value are removed while iterating - drain: bool, - /// Function that take `(raw_key_without_prefix, raw_value)` and decode `T`. - /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. - closure: fn(&[u8], &[u8]) -> Result, -} - -impl Iterator for MapIterator { - type Item = T; - - fn next(&mut self) -> Option { - loop { - let maybe_next = sp_io::storage::next_key(&self.previous_key) - .filter(|n| n.starts_with(&self.prefix)); - break match maybe_next { - Some(next) => { - self.previous_key = next; - let raw_value = match unhashed::get_raw(&self.previous_key) { - Some(raw_value) => raw_value, - None => { - frame_support::print("ERROR: next_key returned a key with no value in MapIterator"); - continue - } - }; - if self.drain { - unhashed::kill(&self.previous_key) - } - let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; - let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { - Ok(item) => item, - Err(_e) => { - frame_support::print("ERROR: (key, value) failed to decode in MapIterator"); - continue - } - }; - - Some(item) - } - None => None, - } - } - } -} - impl< K1: FullCodec, K2: FullCodec, @@ -379,8 +332,8 @@ impl< G::Hasher1: ReversibleStorageHasher, G::Hasher2: ReversibleStorageHasher { - type PrefixIterator = MapIterator<(K2, V)>; - type Iterator = MapIterator<(K1, K2, V)>; + type PrefixIterator = PrefixIterator<(K2, V)>; + type Iterator = PrefixIterator<(K1, K2, V)>; fn iter_prefix(k1: impl EncodeLike) -> Self::PrefixIterator { let prefix = G::storage_double_map_final_key1(k1); @@ -423,23 +376,41 @@ impl< iterator } - fn translate Option>(f: F) { + fn translate Option>(f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); - loop { - match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { - Some(next) => { - previous_key = next; - let maybe_value = unhashed::get::(&previous_key); - match maybe_value { - Some(value) => match f(value) { - Some(new) => unhashed::put::(&previous_key, &new), - None => unhashed::kill(&previous_key), - }, - None => continue, - } - } - None => return, + while let Some(next) = sp_io::storage::next_key(&previous_key) + .filter(|n| n.starts_with(&prefix)) + { + previous_key = next; + let value = match unhashed::get::(&previous_key) { + Some(value) => value, + None => { + crate::debug::error!("Invalid translate: fail to decode old value"); + continue + }, + }; + let mut key_material = G::Hasher1::reverse(&previous_key[prefix.len()..]); + let key1 = match K1::decode(&mut key_material) { + Ok(key1) => key1, + Err(_) => { + crate::debug::error!("Invalid translate: fail to decode key1"); + continue + }, + }; + + let mut key2_material = G::Hasher2::reverse(&key_material); + let key2 = match K2::decode(&mut key2_material) { + Ok(key2) => key2, + Err(_) => { + crate::debug::error!("Invalid translate: fail to decode key2"); + continue + }, + }; + + match f(key1, key2, value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), } } } @@ -447,10 +418,12 @@ impl< /// Test iterators for StorageDoubleMap #[cfg(test)] -#[allow(dead_code)] mod test_iterators { use codec::{Encode, Decode}; - use crate::storage::{generator::StorageDoubleMap, IterableStorageDoubleMap, unhashed}; + use crate::{ + hash::StorageHasher, + storage::{generator::StorageDoubleMap, IterableStorageDoubleMap, unhashed}, + }; pub trait Trait { type Origin; @@ -466,7 +439,7 @@ mod test_iterators { crate::decl_storage! { trait Store for Module as Test { - DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(blake2_128_concat) u32 => u64; + DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(twox_64_concat) u32 => u64; } } @@ -484,11 +457,6 @@ mod test_iterators { prefix } - fn key_in_prefix(mut prefix: Vec) -> Vec { - prefix.push(0); - prefix - } - #[test] fn double_map_reversible_reversible_iteration() { sp_io::TestExternalities::default().execute_with(|| { @@ -534,22 +502,59 @@ mod test_iterators { assert_eq!( DoubleMap::iter_prefix(k1).collect::>(), - vec![(0, 0), (2, 2), (1, 1), (3, 3)], + vec![(1, 1), (2, 2), (0, 0), (3, 3)], ); assert_eq!( DoubleMap::iter_prefix_values(k1).collect::>(), - vec![0, 2, 1, 3], + vec![1, 2, 0, 3], ); assert_eq!( DoubleMap::drain_prefix(k1).collect::>(), - vec![(0, 0), (2, 2), (1, 1), (3, 3)], + vec![(1, 1), (2, 2), (0, 0), (3, 3)], ); assert_eq!(DoubleMap::iter_prefix(k1).collect::>(), vec![]); assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); + + // Translate + let prefix = DoubleMap::prefix_hash(); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + for i in 0..4 { + DoubleMap::insert(i as u16, i as u32, i as u64); + } + + // Wrong key1 + unhashed::put( + &[prefix.clone(), vec![1, 2, 3]].concat(), + &3u64.encode() + ); + + // Wrong key2 + unhashed::put( + &[prefix.clone(), crate::Blake2_128Concat::hash(&1u16.encode())].concat(), + &3u64.encode() + ); + + // Wrong value + unhashed::put( + &[ + prefix.clone(), + crate::Blake2_128Concat::hash(&1u16.encode()), + crate::Twox64Concat::hash(&2u32.encode()), + ].concat(), + &vec![1], + ); + + DoubleMap::translate(|_k1, _k2, v: u64| Some(v*2)); + assert_eq!( + DoubleMap::iter().collect::>(), + vec![(3, 3, 6), (0, 0, 0), (2, 2, 4), (1, 1, 2)], + ); }) } } diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index fe932b797940b8ac8e8d08b74d1f96d984a7e0a6..1c13de52e164040976286bcbcef9125e9f8e1349 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -20,7 +20,7 @@ use sp_std::prelude::*; use sp_std::borrow::Borrow; use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike}; use crate::{ - storage::{self, unhashed, StorageAppend}, + storage::{self, unhashed, StorageAppend, PrefixIterator}, Never, hash::{StorageHasher, Twox128, ReversibleStorageHasher}, }; @@ -139,53 +139,56 @@ impl< > storage::IterableStorageMap for G where G::Hasher: ReversibleStorageHasher { - type Iterator = StorageMapIterator; + type Iterator = PrefixIterator<(K, V)>; /// Enumerate all elements in the map. fn iter() -> Self::Iterator { let prefix = G::prefix_hash(); - Self::Iterator { + PrefixIterator { prefix: prefix.clone(), previous_key: prefix, drain: false, - _phantom: Default::default(), + closure: |raw_key_without_prefix, mut raw_value| { + let mut key_material = G::Hasher::reverse(raw_key_without_prefix); + Ok((K::decode(&mut key_material)?, V::decode(&mut raw_value)?)) + }, } } /// Enumerate all elements in the map. fn drain() -> Self::Iterator { - let prefix = G::prefix_hash(); - Self::Iterator { - prefix: prefix.clone(), - previous_key: prefix, - drain: true, - _phantom: Default::default(), - } + let mut iterator = Self::iter(); + iterator.drain = true; + iterator } fn translate Option>(f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); - loop { - match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { - Some(next) => { - previous_key = next; - let maybe_value = unhashed::get::(&previous_key); - match maybe_value { - Some(value) => { - let mut key_material = G::Hasher::reverse(&previous_key[prefix.len()..]); - match K::decode(&mut key_material) { - Ok(key) => match f(key, value) { - Some(new) => unhashed::put::(&previous_key, &new), - None => unhashed::kill(&previous_key), - }, - Err(_) => continue, - } - } - None => continue, - } - } - None => return, + while let Some(next) = sp_io::storage::next_key(&previous_key) + .filter(|n| n.starts_with(&prefix)) + { + previous_key = next; + let value = match unhashed::get::(&previous_key) { + Some(value) => value, + None => { + crate::debug::error!("Invalid translate: fail to decode old value"); + continue + }, + }; + + let mut key_material = G::Hasher::reverse(&previous_key[prefix.len()..]); + let key = match K::decode(&mut key_material) { + Ok(key) => key, + Err(_) => { + crate::debug::error!("Invalid translate: fail to decode key"); + continue + }, + }; + + match f(key, value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), } } } @@ -312,3 +315,91 @@ impl> storage::StorageMap }) } } + +/// Test iterators for StorageMap +#[cfg(test)] +mod test_iterators { + use codec::{Encode, Decode}; + use crate::{ + hash::StorageHasher, + storage::{generator::StorageMap, IterableStorageMap, unhashed}, + }; + + pub trait Trait { + type Origin; + type BlockNumber; + } + + crate::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + #[derive(PartialEq, Eq, Clone, Encode, Decode)] + struct NoDef(u32); + + crate::decl_storage! { + trait Store for Module as Test { + Map: map hasher(blake2_128_concat) u16 => u64; + } + } + + fn key_before_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert!(*last != 0, "mock function not implemented for this prefix"); + *last -= 1; + prefix + } + + fn key_after_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert!(*last != 255, "mock function not implemented for this prefix"); + *last += 1; + prefix + } + + #[test] + fn map_reversible_reversible_iteration() { + sp_io::TestExternalities::default().execute_with(|| { + // All map iterator + let prefix = Map::prefix_hash(); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + + for i in 0..4 { + Map::insert(i as u16, i as u64); + } + + assert_eq!(Map::iter().collect::>(), vec![(3, 3), (0, 0), (2, 2), (1, 1)]); + + assert_eq!(Map::iter_values().collect::>(), vec![3, 0, 2, 1]); + + assert_eq!(Map::drain().collect::>(), vec![(3, 3), (0, 0), (2, 2), (1, 1)]); + + assert_eq!(Map::iter().collect::>(), vec![]); + assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); + assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); + + // Translate + let prefix = Map::prefix_hash(); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + for i in 0..4 { + Map::insert(i as u16, i as u64); + } + + // Wrong key + unhashed::put(&[prefix.clone(), vec![1, 2, 3]].concat(), &3u64.encode()); + + // Wrong value + unhashed::put( + &[prefix.clone(), crate::Blake2_128Concat::hash(&6u16.encode())].concat(), + &vec![1], + ); + + Map::translate(|_k1, v: u64| Some(v*2)); + assert_eq!(Map::iter().collect::>(), vec![(3, 6), (0, 0), (2, 4), (1, 2)]); + }) + } +} diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 347fd814136d77c682a47a5286fef367a0d4ffdb..5ee144c79c4db13fcee2c8336779696bea6e88da 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -17,7 +17,7 @@ //! Stuff to do with the runtime's storage. -use sp_std::{prelude::*, marker::PhantomData}; +use sp_std::prelude::*; use codec::{FullCodec, FullEncode, Encode, EncodeLike, Decode}; use crate::hash::{Twox128, StorageHasher}; use sp_runtime::generic::{Digest, DigestItem}; @@ -251,6 +251,8 @@ pub trait IterableStorageMap: StorageMap { /// Translate the values of all elements by a function `f`, in the map in no particular order. /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. fn translate Option>(f: F); } @@ -286,7 +288,9 @@ pub trait IterableStorageDoubleMap< /// Translate the values of all elements by a function `f`, in the map in no particular order. /// By returning `None` from `f` for an element, you'll remove it from the map. - fn translate Option>(f: F); + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + fn translate Option>(f: F); } /// An implementation of a map with a two keys. @@ -433,35 +437,58 @@ pub trait StorageDoubleMap { >(key1: KeyArg1, key2: KeyArg2) -> Option; } -/// Iterator for prefixed map. -pub struct PrefixIterator { +/// Iterate over a prefix and decode raw_key and raw_value into `T`. +/// +/// If any decoding fails it skips it and continues to the next key. +pub struct PrefixIterator { prefix: Vec, previous_key: Vec, - phantom_data: PhantomData, + /// If true then value are removed while iterating + drain: bool, + /// Function that take `(raw_key_without_prefix, raw_value)` and decode `T`. + /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. + closure: fn(&[u8], &[u8]) -> Result, } -impl Iterator for PrefixIterator { - type Item = Value; +impl Iterator for PrefixIterator { + type Item = T; fn next(&mut self) -> Option { - match sp_io::storage::next_key(&self.previous_key) - .filter(|n| n.starts_with(&self.prefix[..])) - { - Some(next_key) => { - let value = unhashed::get(&next_key); - - if value.is_none() { - runtime_print!( - "ERROR: returned next_key has no value:\nkey is {:?}\nnext_key is {:?}", - &self.previous_key, &next_key, - ); + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next; + let raw_value = match unhashed::get_raw(&self.previous_key) { + Some(raw_value) => raw_value, + None => { + crate::debug::error!( + "next_key returned a key with no value at {:?}", + self.previous_key + ); + continue + } + }; + if self.drain { + unhashed::kill(&self.previous_key) + } + let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; + let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { + Ok(item) => item, + Err(e) => { + crate::debug::error!( + "(key, value) failed to decode at {:?}: {:?}", + self.previous_key, e + ); + continue + } + }; + + Some(item) } - - self.previous_key = next_key; - - value - }, - _ => None, + None => None, + } } } } @@ -493,22 +520,22 @@ pub trait StoragePrefixedMap { } /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode becaues storage is corrupted then it is skipped. fn iter_values() -> PrefixIterator { let prefix = Self::final_prefix(); PrefixIterator { prefix: prefix.to_vec(), previous_key: prefix.to_vec(), - phantom_data: Default::default(), + drain: false, + closure: |_raw_key, mut raw_value| Value::decode(&mut raw_value), } } - /// Translate the values from some previous `OldValue` to the current type. - /// - /// `TV` translates values. + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. /// - /// Returns `Err` if the map could not be interpreted as the old type, and Ok if it could. - /// The `Err` contains the number of value that couldn't be interpreted, those value are - /// removed from the map. + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. /// /// # Warning /// @@ -517,33 +544,28 @@ pub trait StoragePrefixedMap { /// /// # Usage /// - /// This would typically be called inside the module implementation of on_runtime_upgrade, while - /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More - /// precisely prior initialized modules doesn't make use of this storage). - fn translate_values(translate_val: TV) -> Result<(), u32> - where OldValue: Decode, TV: Fn(OldValue) -> Value - { + /// This would typically be called inside the module implementation of on_runtime_upgrade. + fn translate_values Option>(f: F) { let prefix = Self::final_prefix(); - let mut previous_key = prefix.to_vec(); - let mut errors = 0; - while let Some(next_key) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix[..])) + let mut previous_key = prefix.clone().to_vec(); + while let Some(next) = sp_io::storage::next_key(&previous_key) + .filter(|n| n.starts_with(&prefix)) { - if let Some(value) = unhashed::get(&next_key) { - unhashed::put(&next_key[..], &translate_val(value)); - } else { - // We failed to read the value. Remove the key and increment errors. - unhashed::kill(&next_key[..]); - errors += 1; + previous_key = next; + let maybe_value = unhashed::get::(&previous_key); + match maybe_value { + Some(value) => match f(value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), + }, + None => { + crate::debug::error!( + "old key failed to decode at {:?}", + previous_key + ); + continue + }, } - - previous_key = next_key; - } - - if errors == 0 { - Ok(()) - } else { - Err(errors) } } } @@ -634,7 +656,7 @@ mod test { assert_eq!(MyStorage::final_prefix().to_vec(), k); // test iteration - assert_eq!(MyStorage::iter_values().collect::>(), vec![]); + assert!(MyStorage::iter_values().collect::>().is_empty()); unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u64); unhashed::put(&[&k[..], &vec![1, 1][..]].concat(), &2u64); @@ -645,14 +667,14 @@ mod test { // test removal MyStorage::remove_all(); - assert_eq!(MyStorage::iter_values().collect::>(), vec![]); + assert!(MyStorage::iter_values().collect::>().is_empty()); // test migration unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u32); unhashed::put(&[&k[..], &vec![8][..]].concat(), &2u32); - assert_eq!(MyStorage::iter_values().collect::>(), vec![]); - MyStorage::translate_values(|v: u32| v as u64).unwrap(); + assert!(MyStorage::iter_values().collect::>().is_empty()); + MyStorage::translate_values(|v: u32| Some(v as u64)); assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2]); MyStorage::remove_all(); @@ -664,8 +686,8 @@ mod test { // (contains some value that successfully decoded to u64) assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); - assert_eq!(MyStorage::translate_values(|v: u128| v as u64), Err(2)); - assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 3]); + MyStorage::translate_values(|v: u128| Some(v as u64)); + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); MyStorage::remove_all(); // test that other values are not modified. diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 9750c11c8a326e13c2f3573f060dbe84244806d1..966941412872151f990f32343478b089283b89b4 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1110,6 +1110,9 @@ pub trait LockableCurrency: Currency { /// The quantity used to denote time; usually just a `BlockNumber`. type Moment; + /// The maximum number of locks a user should have on their account. + type MaxLocks: Get; + /// Create a new balance lock on account `who`. /// /// If the new lock is valid (i.e. not already expired), it will push the struct to @@ -1310,8 +1313,6 @@ impl ChangeMembers for () { fn set_prime(_: Option) {} } - - /// Trait for type that can handle the initialization of account IDs at genesis. pub trait InitializeMembers { /// Initialize the members to the given `members`. @@ -1345,7 +1346,10 @@ pub trait Randomness { } } -impl Randomness for () { +/// Provides an implementation of [`Randomness`] that should only be used in tests! +pub struct TestRandomness; + +impl Randomness for TestRandomness { fn random(subject: &[u8]) -> Output { Output::decode(&mut TrailingZeroInput::new(subject)).unwrap_or_default() } @@ -1374,16 +1378,20 @@ pub trait ValidatorRegistration { fn is_registered(id: &ValidatorId) -> bool; } -/// Something that can convert a given module into the index of the module in the runtime. +/// Provides information about the pallet setup in the runtime. /// -/// The index of a module is determined by the position it appears in `construct_runtime!`. -pub trait ModuleToIndex { - /// Convert the given module `M` into an index. - fn module_to_index() -> Option; -} - -impl ModuleToIndex for () { - fn module_to_index() -> Option { Some(0) } +/// An implementor should be able to provide information about each pallet that +/// is configured in `construct_runtime!`. +pub trait PalletInfo { + /// Convert the given pallet `P` into its index as configured in the runtime. + fn index() -> Option; + /// Convert the given pallet `P` into its name as configured in the runtime. + fn name() -> Option<&'static str>; +} + +impl PalletInfo for () { + fn index() -> Option { Some(0) } + fn name() -> Option<&'static str> { Some("test") } } /// The function and pallet name of the Call. diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index db1e25ca7ab2e08112c5defa8be035a34e5e8b95..1d19eeef70d79474a0fd23f1e050b6573fafe8bc 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -242,6 +242,30 @@ impl Default for DispatchClass { } } +/// Primitives related to priority management of Frame. +pub mod priority { + /// The starting point of all Operational transactions. 3/4 of u64::max_value(). + pub const LIMIT: u64 = 13_835_058_055_282_163_711_u64; + + /// Wrapper for priority of different dispatch classes. + /// + /// This only makes sure that any value created for the operational dispatch class is + /// incremented by [`LIMIT`]. + pub enum FrameTransactionPriority { + Normal(u64), + Operational(u64), + } + + impl From for u64 { + fn from(priority: FrameTransactionPriority) -> Self { + match priority { + FrameTransactionPriority::Normal(inner) => inner, + FrameTransactionPriority::Operational(inner) => inner.saturating_add(LIMIT), + } + } + } +} + /// A bundle of static information collected from the `#[weight = $x]` attributes. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] pub struct DispatchInfo { diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 22d420a61751a3ee920f33707ec697727d7fdbfe..ee8ace5c983c5034fb79030ac2e99b8155e1d9fb 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-test" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,16 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-io = { version = "2.0.0-rc5", path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../../primitives/state-machine" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/inherents" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -trybuild = "1.0.17" +sp-io = { version = "2.0.0", path = "../../../primitives/io", default-features = false } +sp-state-machine = { version = "0.8.0", optional = true, path = "../../../primitives/state-machine" } +frame-support = { version = "2.0.0", default-features = false, path = "../" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +trybuild = "1.0.33" pretty_assertions = "0.6.1" rustversion = "1.0.0" +frame-metadata = { version = "12.0.0", default-features = false, path = "../../metadata" } [features] default = ["std"] diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index c0baf448eed850511b2cdc33e307d6758739da8f..d5f49299880ca9ac213f2e388c7059387a9cc024 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -32,5 +32,5 @@ pub trait Trait { frame_support::decl_module! { /// Some test module - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 10fc3319fb080fb34d70fd0dcddc3b991c4290ff..9a17e44dbef4c7a797201ee7e495b50950e62de0 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -24,13 +24,14 @@ use sp_runtime::{generic, traits::{BlakeTwo256, Block as _, Verify}, DispatchError}; use sp_core::{H256, sr25519}; use sp_std::cell::RefCell; +use frame_support::traits::PalletInfo as _; mod system; pub trait Currency {} thread_local! { - pub static INTEGRITY_TEST_EXEC: RefCell = RefCell::new(0); + pub static INTEGRITY_TEST_EXEC: RefCell = RefCell::new(0); } mod module1 { @@ -40,7 +41,7 @@ mod module1 { frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: ::Origin + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -49,6 +50,17 @@ mod module1 { } } + #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] + pub struct Origin(pub core::marker::PhantomData::<(T, I)>); + + frame_support::decl_event! { + pub enum Event where + ::AccountId + { + A(AccountId), + } + } + frame_support::decl_error! { pub enum Error for Module, I: Instance> { Something @@ -67,7 +79,7 @@ mod module2 { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::Origin + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -80,6 +92,15 @@ mod module2 { } } + #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] + pub struct Origin; + + frame_support::decl_event! { + pub enum Event { + A, + } + } + frame_support::decl_error! { pub enum Error for Module { Something @@ -91,8 +112,7 @@ mod module2 { } } -impl module1::Trait for Runtime {} -impl module1::Trait for Runtime {} +impl module1::Trait for Runtime {} impl module2::Trait for Runtime {} pub type Signature = sr25519::Signature; @@ -107,7 +127,7 @@ impl system::Trait for Runtime { type BlockNumber = BlockNumber; type AccountId = AccountId; type Event = Event; - type ModuleToIndex = ModuleToIndex; + type PalletInfo = PalletInfo; type Call = Call; } @@ -117,10 +137,17 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Module1_1: module1::::{Module, Call, Storage}, - Module2: module2::{Module, Call, Storage}, - Module1_2: module1::::{Module, Call, Storage}, + System: system::{Module, Call, Event, Origin} = 30, + Module1_1: module1::::{Module, Call, Storage, Event, Origin}, + Module2: module2::{Module, Call, Storage, Event, Origin}, + Module1_2: module1::::{Module, Call, Storage, Event, Origin}, + Module1_3: module1::::{Module, Storage} = 6, + Module1_4: module1::::{Module, Call} = 3, + Module1_5: module1::::{Module, Event}, + Module1_6: module1::::{Module, Call, Storage, Event, Origin} = 1, + Module1_7: module1::::{Module, Call, Storage, Event, Origin}, + Module1_8: module1::::{Module, Call, Storage, Event, Origin} = 12, + Module1_9: module1::::{Module, Call, Storage, Event, Origin}, } ); @@ -129,27 +156,47 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; #[test] -fn check_module1_1_error_type() { +fn check_modules_error_type() { assert_eq!( Module1_1::fail(system::Origin::::Root.into()), - Err(DispatchError::Module { index: 1, error: 0, message: Some("Something") }), + Err(DispatchError::Module { index: 31, error: 0, message: Some("Something") }), + ); + assert_eq!( + Module2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 32, error: 0, message: Some("Something") }), ); -} - -#[test] -fn check_module1_2_error_type() { assert_eq!( Module1_2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 33, error: 0, message: Some("Something") }), + ); + assert_eq!( + Module1_3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 6, error: 0, message: Some("Something") }), + ); + assert_eq!( + Module1_4::fail(system::Origin::::Root.into()), Err(DispatchError::Module { index: 3, error: 0, message: Some("Something") }), ); -} - -#[test] -fn check_module2_error_type() { assert_eq!( - Module2::fail(system::Origin::::Root.into()), + Module1_5::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 4, error: 0, message: Some("Something") }), + ); + assert_eq!( + Module1_6::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 1, error: 0, message: Some("Something") }), + ); + assert_eq!( + Module1_7::fail(system::Origin::::Root.into()), Err(DispatchError::Module { index: 2, error: 0, message: Some("Something") }), ); + assert_eq!( + Module1_8::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 12, error: 0, message: Some("Something") }), + ); + assert_eq!( + Module1_9::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 13, error: 0, message: Some("Something") }), + ); } #[test] @@ -157,3 +204,341 @@ fn integrity_test_works() { __construct_runtime_integrity_test::runtime_integrity_tests(); assert_eq!(INTEGRITY_TEST_EXEC.with(|i| *i.borrow()), 1); } + +#[test] +fn origin_codec() { + use codec::Encode; + + let origin = OriginCaller::system(system::RawOrigin::None); + assert_eq!(origin.encode()[0], 30); + + let origin = OriginCaller::module1_Instance1(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 31); + + let origin = OriginCaller::module2(module2::Origin); + assert_eq!(origin.encode()[0], 32); + + let origin = OriginCaller::module1_Instance2(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 33); + + let origin = OriginCaller::module1_Instance6(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 1); + + let origin = OriginCaller::module1_Instance7(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 2); + + let origin = OriginCaller::module1_Instance8(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 12); + + let origin = OriginCaller::module1_Instance9(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 13); +} + +#[test] +fn event_codec() { + use codec::Encode; + + let event = system::Event::::ExtrinsicSuccess; + assert_eq!(Event::from(event).encode()[0], 30); + + let event = module1::Event::::A(Default::default()); + assert_eq!(Event::from(event).encode()[0], 31); + + let event = module2::Event::A; + assert_eq!(Event::from(event).encode()[0], 32); + + let event = module1::Event::::A(Default::default()); + assert_eq!(Event::from(event).encode()[0], 33); + + let event = module1::Event::::A(Default::default()); + assert_eq!(Event::from(event).encode()[0], 4); + + let event = module1::Event::::A(Default::default()); + assert_eq!(Event::from(event).encode()[0], 1); + + let event = module1::Event::::A(Default::default()); + assert_eq!(Event::from(event).encode()[0], 2); + + let event = module1::Event::::A(Default::default()); + assert_eq!(Event::from(event).encode()[0], 12); + + let event = module1::Event::::A(Default::default()); + assert_eq!(Event::from(event).encode()[0], 13); +} + +#[test] +fn call_codec() { + use codec::Encode; + assert_eq!(Call::System(system::Call::noop()).encode()[0], 30); + assert_eq!(Call::Module1_1(module1::Call::fail()).encode()[0], 31); + assert_eq!(Call::Module2(module2::Call::fail()).encode()[0], 32); + assert_eq!(Call::Module1_2(module1::Call::fail()).encode()[0], 33); + assert_eq!(Call::Module1_4(module1::Call::fail()).encode()[0], 3); + assert_eq!(Call::Module1_6(module1::Call::fail()).encode()[0], 1); + assert_eq!(Call::Module1_7(module1::Call::fail()).encode()[0], 2); + assert_eq!(Call::Module1_8(module1::Call::fail()).encode()[0], 12); + assert_eq!(Call::Module1_9(module1::Call::fail()).encode()[0], 13); +} + +#[test] +fn test_metadata() { + use frame_metadata::*; + let expected_metadata: RuntimeMetadataLastVersion = RuntimeMetadataLastVersion { + modules: DecodeDifferent::Encode(&[ + ModuleMetadata { + name: DecodeDifferent::Encode("System"), + storage: None, + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { + name: DecodeDifferent::Encode("noop"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[ + EventMetadata { + name: DecodeDifferent::Encode("ExtrinsicSuccess"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + EventMetadata { + name: DecodeDifferent::Encode("ExtrinsicFailed"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + EventMetadata { + name: DecodeDifferent::Encode("Ignore"), + arguments: DecodeDifferent::Encode(&["BlockNumber"]), + documentation: DecodeDifferent::Encode(&[]), + }, + ]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 30, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_1"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Instance1Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ + FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + ]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 31, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module2"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ + FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + ]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[ + EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + ]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 32, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_2"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Instance2Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 33, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_3"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Instance3Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: None, + event: None, + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 6, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_4"), + storage: None, + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + event: None, + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 3, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_5"), + storage: None, + calls: None, + event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 4, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_6"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Instance6Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 1, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_7"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Instance7Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 2, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_8"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Instance8Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 12, + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module1_9"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("Instance9Module"), + entries: DecodeDifferent::Encode(&[]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }]))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + index: 13, + }, + ]), + extrinsic: ExtrinsicMetadata { + version: 4, + signed_extensions: vec![DecodeDifferent::Encode("UnitSignedExtension")], + }, + }; + pretty_assertions::assert_eq!(Runtime::metadata().1, RuntimeMetadata::V12(expected_metadata)); +} + +#[test] +fn pallet_in_runtime_is_correct() { + assert_eq!(PalletInfo::index::().unwrap(), 30); + assert_eq!(PalletInfo::name::().unwrap(), "System"); + + assert_eq!(PalletInfo::index::().unwrap(), 31); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_1"); + + assert_eq!(PalletInfo::index::().unwrap(), 32); + assert_eq!(PalletInfo::name::().unwrap(), "Module2"); + + assert_eq!(PalletInfo::index::().unwrap(), 33); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_2"); + + assert_eq!(PalletInfo::index::().unwrap(), 6); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_3"); + + assert_eq!(PalletInfo::index::().unwrap(), 3); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_4"); + + assert_eq!(PalletInfo::index::().unwrap(), 4); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_5"); + + assert_eq!(PalletInfo::index::().unwrap(), 1); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_6"); + + assert_eq!(PalletInfo::index::().unwrap(), 2); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_7"); + + assert_eq!(PalletInfo::index::().unwrap(), 12); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_8"); + + assert_eq!(PalletInfo::index::().unwrap(), 13); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_9"); +} diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index.rs b/frame/support/test/tests/construct_runtime_ui/conflicting_index.rs new file mode 100644 index 0000000000000000000000000000000000000000..a48b4bd0970396697751977f57a674f64da8fd77 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index.rs @@ -0,0 +1,14 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + UncheckedExtrinsic = UncheckedExtrinsic, + Block = Block, + NodeBlock = Block, + { + System: system::{}, + Pallet1: pallet1::{} = 0, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr new file mode 100644 index 0000000000000000000000000000000000000000..65368666c88fe879c79c1fe30705a4c44e469b4e --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr @@ -0,0 +1,11 @@ +error: Module indices are conflicting: Both modules System and Pallet1 are at index 0 + --> $DIR/conflicting_index.rs:9:3 + | +9 | System: system::{}, + | ^^^^^^ + +error: Module indices are conflicting: Both modules System and Pallet1 are at index 0 + --> $DIR/conflicting_index.rs:10:3 + | +10 | Pallet1: pallet1::{} = 0, + | ^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.rs b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.rs new file mode 100644 index 0000000000000000000000000000000000000000..c949cb41a23fa43feb97efe080432ada9d19a130 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.rs @@ -0,0 +1,16 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + UncheckedExtrinsic = UncheckedExtrinsic, + Block = Block, + NodeBlock = Block, + { + System: system::{} = 5, + Pallet1: pallet1::{} = 3, + Pallet2: pallet2::{}, + Pallet3: pallet3::{}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr new file mode 100644 index 0000000000000000000000000000000000000000..b792ff5d2a5411b6e1a86578ed32882de1dd2cf0 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr @@ -0,0 +1,11 @@ +error: Module indices are conflicting: Both modules System and Pallet3 are at index 5 + --> $DIR/conflicting_index_2.rs:9:3 + | +9 | System: system::{} = 5, + | ^^^^^^ + +error: Module indices are conflicting: Both modules System and Pallet3 are at index 5 + --> $DIR/conflicting_index_2.rs:12:3 + | +12 | Pallet3: pallet3::{}, + | ^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.rs b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.rs new file mode 100644 index 0000000000000000000000000000000000000000..4c8331ae442c87680356e8b577dc3392904bea0d --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.rs @@ -0,0 +1,14 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + UncheckedExtrinsic = UncheckedExtrinsic, + Block = Block, + NodeBlock = Block, + { + System: system::{} = 255, + Pallet256: pallet256::{}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr new file mode 100644 index 0000000000000000000000000000000000000000..c0ef5c8e60b9ecc53279a12d1b060d6de881d4b6 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr @@ -0,0 +1,5 @@ +error: Module index doesn't fit into u8, index is 256 + --> $DIR/more_than_256_modules.rs:10:3 + | +10 | Pallet256: pallet256::{}, + | ^^^^^^^^^ diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs index 4dbae05f07ff77869d3f7c6367079138d7cbf73f..56eff29c5dc1bf137d3f9941606489f550781320 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin, system=self { fn integrity_test() {} fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index d6498961d31c8006ec5f37218f9ff16b19fc80e4..25f3b891d9b47b48aabc204baea95b2b023bbc49 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -2,7 +2,7 @@ error: `integrity_test` can only be passed once as input. --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index 4f05134997e81e380010ff74a462c81f8833ac4d..3e1bc25c8d59c353471806a36985ead744638f83 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin, system=self { fn on_initialize() -> Weight { 0 } diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 8a9f025046b7ee57323afd3c6faa0b2c6d5cda43..34c5ff3f941a179a664777d6e0d2c9ee84ad5e39 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -2,7 +2,7 @@ error: `on_initialize` can only be passed once as input. --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index cda1d810d225ca55cfed587cc9322cf47566c999..800ce459fed3513098b4c3c48e9bc18a66513ce5 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -25,7 +25,7 @@ mod tests { use codec::{Encode, Decode, EncodeLike}; frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } pub trait Trait { @@ -420,7 +420,7 @@ mod test2 { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } type PairOf = (T, T); @@ -455,7 +455,7 @@ mod test3 { type BlockNumber; } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } frame_support::decl_storage! { trait Store for Module as Test { @@ -485,7 +485,7 @@ mod test_append_and_len { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] @@ -520,7 +520,7 @@ mod test_append_and_len { fn default_for_option() { TestExternalities::default().execute_with(|| { assert_eq!(OptionVec::get(), None); - assert_eq!(JustVec::get(), vec![]); + assert!(JustVec::get().is_empty()); }); } @@ -553,7 +553,7 @@ mod test_append_and_len { let key = JustVec::hashed_key(); // Set it to some invalid value. frame_support::storage::unhashed::put_raw(&key, &*b"1"); - assert_eq!(JustVec::get(), Vec::new()); + assert!(JustVec::get().is_empty()); assert_eq!(frame_support::storage::unhashed::get_raw(&key), Some(b"1".to_vec())); JustVec::append(1); @@ -600,7 +600,7 @@ mod test_append_and_len { fn len_works_ignores_default_assignment() { TestExternalities::default().execute_with(|| { // vec - assert_eq!(JustVec::get(), vec![]); + assert!(JustVec::get().is_empty()); assert_eq!(JustVec::decode_len(), None); assert_eq!(JustVecWithDefault::get(), vec![6, 9]); @@ -610,7 +610,7 @@ mod test_append_and_len { assert_eq!(OptionVec::decode_len(), None); // map - assert_eq!(MapVec::get(0), vec![]); + assert!(MapVec::get(0).is_empty()); assert_eq!(MapVec::decode_len(0), None); assert_eq!(MapVecWithDefault::get(0), vec![6, 9]); @@ -620,7 +620,7 @@ mod test_append_and_len { assert_eq!(OptionMapVec::decode_len(0), None); // Double map - assert_eq!(DoubleMapVec::get(0, 0), vec![]); + assert!(DoubleMapVec::get(0, 0).is_empty()); assert_eq!(DoubleMapVec::decode_len(0, 1), None); assert_eq!(DoubleMapVecWithDefault::get(0, 0), vec![6, 9]); diff --git a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs index 4d510da9f893652be1381babff9fe09980baf753..f4f4ad7d48a9709868098967d0b39e602650db9d 100644 --- a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs @@ -21,7 +21,7 @@ pub trait Trait { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs index 49897e625186855c41b1848036116df058a9d146..3caa2d9c33608559c4c21a8fb2e1adcb5fe6072a 100644 --- a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs @@ -21,7 +21,7 @@ pub trait Trait { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs index 2fa78f4d17c562d6aa3817fb3fd58cb4a1d9d6d2..1c24b3bf28eec850243fe40df72eafa585781f31 100644 --- a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs @@ -21,7 +21,7 @@ pub trait Trait { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index 34da1752da052f5fbf268e270c11c6c9c7852d31..a9f0cdc8f184bfe266bd35ee2526e10729b1fdb5 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -29,7 +29,7 @@ mod no_instance { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } frame_support::decl_storage!{ @@ -50,11 +50,13 @@ mod no_instance { } mod instance { + use super::no_instance; + pub trait Trait: super::no_instance::Trait {} frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> - for enum Call where origin: T::Origin {} + for enum Call where origin: T::Origin, system=no_instance {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index 78b841d29501728e8e2eac048e6f2b465f19034b..af8b393800cf970e85d4255d85143d90a6da8def 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -21,7 +21,7 @@ pub trait Trait { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } frame_support::decl_storage! { diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 33e8cc1fd6c0fb0c59b8f9b7c08665ddd92e5a46..0e4240528a37941c7021e1b1c0f094018cfa837f 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -184,7 +184,7 @@ mod module3 { } frame_support::decl_module! { - pub struct Module for enum Call where origin: ::Origin {} + pub struct Module for enum Call where origin: ::Origin, system=system {} } } @@ -241,7 +241,7 @@ impl system::Trait for Runtime { type BlockNumber = BlockNumber; type AccountId = AccountId; type Event = Event; - type ModuleToIndex = (); + type PalletInfo = (); type Call = Call; } diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 7166f202c7325d2aed5a4a27ce1daaf08ad24398..34310c2f5876f3a1f23bd49f39fdcf7914a1374b 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -84,7 +84,7 @@ mod module { pub trait Trait: system::Trait {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] @@ -164,7 +164,7 @@ impl system::Trait for Runtime { type BlockNumber = BlockNumber; type AccountId = AccountId; type Event = Event; - type ModuleToIndex = (); + type PalletInfo = (); type Call = Call; } diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.rs b/frame/support/test/tests/reserved_keyword/on_initialize.rs index 0751c600cccb2f67ceead54353818a8dd6aba514..db71fe9a1e26a25e5ff825a9396f43086c4e6c08 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.rs +++ b/frame/support/test/tests/reserved_keyword/on_initialize.rs @@ -18,7 +18,7 @@ macro_rules! reserved { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin, system=self { #[weight = 0] fn $reserved(_origin) -> dispatch::DispatchResult { unreachable!() } } diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index a9711ec267e54fa19c3dfd52d984c228685711e1..a7e4a75c27fcb98dfddc0a8b31820a71449d0f1e 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -29,7 +29,7 @@ pub trait Trait { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin, system=self { #[weight = 0] #[transactional] fn value_commits(_origin, v: u32) { diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index 8ca2e97789d549c85f6ce013f1550f8d121eef41..a7d4d43c341a9f2d4c53d4c87262ad4d31259e41 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -27,11 +27,14 @@ pub trait Trait: 'static + Eq + Clone { type AccountId: Encode + EncodeLike + Decode; type Call; type Event: From>; - type ModuleToIndex: frame_support::traits::ModuleToIndex; + type PalletInfo: frame_support::traits::PalletInfo; } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, {} + pub struct Module for enum Call where origin: T::Origin, system=self { + #[weight = 0] + fn noop(origin) {} + } } impl Module { diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index e9a085f85be1816051b5ea721d8f2a3555417d3f..cebf761a907c760f408c1a09d7ff37cf049a428f 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "frame-system" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME system module" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/version" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -criterion = "0.2.11" -sp-externalities = { version = "0.8.0-rc5", path = "../../primitives/externalities" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../test-utils/runtime/client" } +criterion = "0.3.3" +sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] default = ["std"] diff --git a/frame/system/README.md b/frame/system/README.md new file mode 100644 index 0000000000000000000000000000000000000000..adfa7aa35ddda59e18ef32a96c8a114f1c609c7a --- /dev/null +++ b/frame/system/README.md @@ -0,0 +1,75 @@ +# System Module + +The System module provides low-level access to core types and cross-cutting utilities. +It acts as the base layer for other pallets to interact with the Substrate framework components. + +- [`system::Trait`](https://docs.rs/frame-system/latest/frame_system/trait.Trait.html) + +## Overview + +The System module defines the core data types used in a Substrate runtime. +It also provides several utility functions (see [`Module`](https://docs.rs/frame-system/latest/frame_system/struct.Module.html)) for other FRAME pallets. + +In addition, it manages the storage items for extrinsics data, indexes, event records, and digest items, +among other things that support the execution of the current block. + +It also handles low-level tasks like depositing logs, basic set up and take down of +temporary storage entries, and access to previous block hashes. + +## Interface + +### Dispatchable Functions + +The System module does not implement any dispatchable functions. + +### Public Functions + +See the [`Module`](https://docs.rs/frame-system/latest/frame_system/struct.Module.html) struct for details of publicly available functions. + +### Signed Extensions + +The System module defines the following extensions: + + - [`CheckWeight`]: Checks the weight and length of the block and ensure that it does not + exceed the limits. + - [`CheckNonce`]: Checks the nonce of the transaction. Contains a single payload of type + `T::Index`. + - [`CheckEra`]: Checks the era of the transaction. Contains a single payload of type `Era`. + - [`CheckGenesis`]: Checks the provided genesis hash of the transaction. Must be a part of the + signed payload of the transaction. + - [`CheckSpecVersion`]: Checks that the runtime version is the same as the one used to sign the + transaction. + - [`CheckTxVersion`]: Checks that the transaction version is the same as the one used to sign the + transaction. + +Lookup the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed +extensions included in a chain. + +## Usage + +### Prerequisites + +Import the System module and derive your module's configuration trait from the system trait. + +### Example - Get extrinsic count and parent hash for the current block + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::{self as system, ensure_signed}; + +pub trait Trait: system::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn system_module_example(origin) -> dispatch::DispatchResult { + let _sender = ensure_signed(origin)?; + let _extrinsic_count = >::extrinsic_count(); + let _parent_hash = >::parent_hash(); + Ok(()) + } + } +} +``` + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 1b64b813e59101f8b187697f7c25082be764f585..00c965136c0d0f15c802a6d37668e79ee2eea850 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -81,7 +81,7 @@ impl system::Trait for Runtime { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index e8edab813242c174a3a8faf88e76ec23f3a76763..26b9bd9230e00ae11c5a617931348a4a3f363f10 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -1,28 +1,29 @@ [package] name = "frame-system-benchmarking" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME System benchmarking" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../../benchmarking" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../system" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../support" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } +frame-system = { version = "2.0.0", default-features = false, path = "../../system" } +frame-support = { version = "2.0.0", default-features = false, path = "../../support" } +sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } [dev-dependencies] serde = { version = "1.0.101" } -sp-io ={ version = "2.0.0-rc5", path = "../../../primitives/io" } +sp-io ={ version = "2.0.0", path = "../../../primitives/io" } [features] default = ["std"] diff --git a/frame/system/benchmarking/README.md b/frame/system/benchmarking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9718db58b37e9ae1c81b5b627fe27e51129cf418 --- /dev/null +++ b/frame/system/benchmarking/README.md @@ -0,0 +1 @@ +License: Apache-2.0 \ No newline at end of file diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index 049fa5298c677076702256735e6017b994279d0d..653d9536f17972925908290f97ee4d59ee6d6715 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -24,14 +24,13 @@ use sp_std::vec; use sp_std::prelude::*; use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use sp_runtime::traits::Hash; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_support::traits::Get; use frame_support::storage::{self, StorageMap}; use frame_system::{Module as System, Call, RawOrigin, DigestItemOf, AccountInfo}; mod mock; -const SEED: u32 = 0; - pub struct Module(System); pub trait Trait: frame_system::Trait {} @@ -39,29 +38,26 @@ benchmarks! { _ { } remark { - // # of Bytes - let b in 0 .. 16_384; + let b in 0 .. T::MaximumBlockLength::get(); let remark_message = vec![1; b as usize]; - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), remark_message) set_heap_pages { - // Heap page size - let i in 0 .. u32::max_value(); - }: _(RawOrigin::Root, i.into()) + }: _(RawOrigin::Root, Default::default()) // `set_code` was not benchmarked because it is pretty hard to come up with a real // Wasm runtime to test the upgrade with. But this is okay because we will make // `set_code` take a full block anyway. + #[extra] set_code_without_checks { - // Version number - let b in 0 .. 16_384; - let code = vec![1; b as usize]; + // Assume Wasm ~4MB + let code = vec![1; 4_000_000 as usize]; }: _(RawOrigin::Root, code) verify { let current_code = storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; - assert_eq!(current_code.len(), b as usize); + assert_eq!(current_code.len(), 4_000_000 as usize); } set_changes_trie_config { @@ -141,16 +137,15 @@ benchmarks! { } suicide { - let n in 1 .. 1000; - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); let account_info = AccountInfo:: { - nonce: n.into(), + nonce: 1337.into(), refcount: 0, data: T::AccountData::default() }; frame_system::Account::::insert(&caller, account_info); let new_account_info = System::::account(caller.clone()); - assert_eq!(new_account_info.nonce, n.into()); + assert_eq!(new_account_info.nonce, 1337.into()); }: _(RawOrigin::Signed(caller.clone())) verify { let account_info = System::::account(&caller); diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 050fd40afe1389576c630570a919e37aeccda5a6..33255d7b50e19470af07808b47d976d536c8799e 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -71,7 +71,7 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = (); type MaximumBlockLength = (); type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index fa2bd7dd1b859ccb0341c5544936fa36a7dc6cca..d00094364e3e80be0d0bb862556badc45d71c72f 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -1,18 +1,19 @@ [package] name = "frame-system-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by System RPC extensions." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [features] diff --git a/frame/system/rpc/runtime-api/README.md b/frame/system/rpc/runtime-api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ab46c22a8be3345b13dc4674f3234cb8f511851b --- /dev/null +++ b/frame/system/rpc/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition required by System RPC extensions. + +This API should be imported and implemented by the runtime, +of a node that wants to use the custom RPC extension +adding System access methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/system/src/default_weights.rs b/frame/system/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..8a84cb0b7903a4190ea49c2ac3be81dac3892018 --- /dev/null +++ b/frame/system/src/default_weights.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + // WARNING! Some components were not used: ["b"] + fn remark() -> Weight { + (1305000 as Weight) + } + fn set_heap_pages() -> Weight { + (2023000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["d"] + fn set_changes_trie_config() -> Weight { + (10026000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + .saturating_add((656000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_storage(i: u32, ) -> Weight { + (4327000 as Weight) + .saturating_add((478000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_prefix(p: u32, ) -> Weight { + (8349000 as Weight) + .saturating_add((838000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn suicide() -> Weight { + (29247000 as Weight) + } +} diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 1af3a1210aaf417c96019f03b43944184d435e22..e7316457aaffcb1360896c840e45b3eeb198e713 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -25,12 +25,15 @@ use sp_runtime::{ traits::{SignedExtension, DispatchInfoOf, Dispatchable, One}, transaction_validity::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, - TransactionLongevity, TransactionPriority, + TransactionLongevity, }, }; use sp_std::vec; /// Nonce check and increment to give replay protection for transactions. +/// +/// Note that this does not set any priority by default. Make sure that AT LEAST one of the signed +/// extension sets some kind of priority upon validating transactions. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct CheckNonce(#[codec(compact)] T::Index); @@ -90,7 +93,7 @@ impl SignedExtension for CheckNonce where &self, who: &Self::AccountId, _call: &Self::Call, - info: &DispatchInfoOf, + _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { // check index @@ -107,7 +110,7 @@ impl SignedExtension for CheckNonce where }; Ok(ValidTransaction { - priority: info.weight as TransactionPriority, + priority: 0, requires, provides, longevity: TransactionLongevity::max_value(), diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 1395aa87efbcfb54a1a9e9e8422aeb222318821c..092ac59da97c8a66106b0640ae113d40caaa83da 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -27,7 +27,7 @@ use sp_runtime::{ }; use frame_support::{ traits::{Get}, - weights::{PostDispatchInfo, DispatchInfo, DispatchClass}, + weights::{PostDispatchInfo, DispatchInfo, DispatchClass, priority::FrameTransactionPriority}, StorageValue, }; @@ -157,12 +157,18 @@ impl CheckWeight where } /// get the priority of an extrinsic denoted by `info`. + /// + /// Operational transaction will be given a fixed initial amount to be fairly distinguished from + /// the normal ones. fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { match info.class { - DispatchClass::Normal => info.weight.into(), - // Don't use up the whole priority space, to allow things like `tip` - // to be taken into account as well. - DispatchClass::Operational => TransactionPriority::max_value() / 2, + // Normal transaction. + DispatchClass::Normal => + FrameTransactionPriority::Normal(info.weight.into()).into(), + // Don't use up the whole priority space, to allow things like `tip` to be taken into + // account as well. + DispatchClass::Operational => + FrameTransactionPriority::Operational(info.weight.into()).into(), // Mandatory extrinsics are only for inherents; never transactions. DispatchClass::Mandatory => TransactionPriority::min_value(), } @@ -496,7 +502,7 @@ mod tests { } #[test] - fn signed_ext() { + fn signed_ext_check_weight_works() { new_test_ext().execute_with(|| { let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; @@ -512,7 +518,7 @@ mod tests { .validate(&1, CALL, &op, len) .unwrap() .priority; - assert_eq!(priority, u64::max_value() / 2); + assert_eq!(priority, frame_support::weights::priority::LIMIT + 100); }) } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 30d5d019fc5fff51018e809ac3ac80c6a6ee06b7..9518317f7bd54cf1de95ef6c65757a0e43bd2c95 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -117,7 +117,7 @@ use frame_support::{ decl_module, decl_event, decl_storage, decl_error, Parameter, ensure, debug, storage, traits::{ - Contains, Get, ModuleToIndex, OnNewAccount, OnKilledAccount, IsDeadAccount, Happened, + Contains, Get, PalletInfo, OnNewAccount, OnKilledAccount, IsDeadAccount, Happened, StoredMap, EnsureOrigin, OriginTrait, Filter, }, weights::{ @@ -139,6 +139,7 @@ mod extensions; mod weights; #[cfg(test)] mod tests; +mod default_weights; pub use extensions::{ check_mortality::CheckMortality, check_genesis::CheckGenesis, check_nonce::CheckNonce, @@ -159,25 +160,13 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { } pub trait WeightInfo { - fn remark(b: u32, ) -> Weight; - fn set_heap_pages(i: u32, ) -> Weight; - fn set_code_without_checks(b: u32, ) -> Weight; - fn set_changes_trie_config(d: u32, ) -> Weight; + fn remark() -> Weight; + fn set_heap_pages() -> Weight; + fn set_changes_trie_config() -> Weight; fn set_storage(i: u32, ) -> Weight; fn kill_storage(i: u32, ) -> Weight; fn kill_prefix(p: u32, ) -> Weight; - fn suicide(n: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn remark(_b: u32, ) -> Weight { 1_000_000_000 } - fn set_heap_pages(_i: u32, ) -> Weight { 1_000_000_000 } - fn set_code_without_checks(_b: u32, ) -> Weight { 1_000_000_000 } - fn set_changes_trie_config(_d: u32, ) -> Weight { 1_000_000_000 } - fn set_storage(_i: u32, ) -> Weight { 1_000_000_000 } - fn kill_storage(_i: u32, ) -> Weight { 1_000_000_000 } - fn kill_prefix(_p: u32, ) -> Weight { 1_000_000_000 } - fn suicide(_n: u32, ) -> Weight { 1_000_000_000 } + fn suicide() -> Weight; } pub trait Trait: 'static + Eq + Clone { @@ -267,11 +256,13 @@ pub trait Trait: 'static + Eq + Clone { /// Get the chain's current version. type Version: Get; - /// Convert a module to its index in the runtime. + /// Provides information about the pallet setup in the runtime. /// - /// Expects the `ModuleToIndex` type that is being generated by `construct_runtime!` in the - /// runtime. For tests it is okay to use `()` as type (returns `0` for each input). - type ModuleToIndex: ModuleToIndex; + /// Expects the `PalletInfo` type that is being generated by `construct_runtime!` in the + /// runtime. + /// + /// For tests it is okay to use `()` as type, however it will provide "useless" data. + type PalletInfo: PalletInfo; /// Data to be associated with an account (other than nonce/transaction counter, which this /// module does regardless). @@ -365,7 +356,7 @@ fn hash69 + Default>() -> T { type EventIndex = u32; /// Type used to encode the number of references an account has. -pub type RefCount = u8; +pub type RefCount = u32; /// Information of an account. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] @@ -467,6 +458,9 @@ decl_storage! { /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. pub LastRuntimeUpgrade build(|_| Some(LastRuntimeUpgradeInfo::from(T::Version::get()))): Option; + /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. + UpgradedToU32RefCount build(|_| true): bool; + /// The execution phase of the block. ExecutionPhase: Option; } @@ -494,15 +488,15 @@ decl_storage! { decl_event!( /// Event for the System module. pub enum Event where AccountId = ::AccountId { - /// An extrinsic completed successfully. [info] + /// An extrinsic completed successfully. \[info\] ExtrinsicSuccess(DispatchInfo), - /// An extrinsic failed. [error, info] + /// An extrinsic failed. \[error, info\] ExtrinsicFailed(DispatchError, DispatchInfo), /// `:code` was updated. CodeUpdated, - /// A new [account] was created. + /// A new \[account\] was created. NewAccount(AccountId), - /// An [account] was reaped. + /// An \[account\] was reaped. KilledAccount(AccountId), } ); @@ -528,7 +522,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin, system=self { type Error = Error; /// The maximum number of blocks to allow in mortal eras. @@ -549,6 +543,18 @@ decl_module! { /// The maximum length of a block (in bytes). const MaximumBlockLength: u32 = T::MaximumBlockLength::get(); + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if !UpgradedToU32RefCount::get() { + Account::::translate::<(T::Index, u8, T::AccountData), _>(|_key, (nonce, rc, data)| + Some(AccountInfo { nonce, refcount: rc as RefCount, data }) + ); + UpgradedToU32RefCount::put(true); + T::MaximumBlockWeight::get() + } else { + 0 + } + } + /// A dispatch that will fill the block weight up to the given ratio. // TODO: This should only be available for testing, rather than in general usage, but // that's not possible at present (since it's within the decl_module macro). @@ -564,7 +570,7 @@ decl_module! { /// - Base Weight: 0.665 µs, independent of remark length. /// - No DB operations. /// # - #[weight = 700_000] + #[weight = T::SystemWeightInfo::remark()] fn remark(origin, _remark: Vec) { ensure_signed(origin)?; } @@ -577,7 +583,7 @@ decl_module! { /// - Base Weight: 1.405 µs /// - 1 write to HEAP_PAGES /// # - #[weight = (T::DbWeight::get().writes(1) + 1_500_000, DispatchClass::Operational)] + #[weight = (T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational)] fn set_heap_pages(origin, pages: u64) { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); @@ -627,7 +633,7 @@ decl_module! { /// - DB Weight: /// - Writes: Changes Trie, System Digest /// # - #[weight = (T::DbWeight::get().writes(2) + 10_000_000, DispatchClass::Operational)] + #[weight = (T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational)] pub fn set_changes_trie_config(origin, changes_trie_config: Option) { ensure_root(origin)?; match changes_trie_config.clone() { @@ -653,8 +659,7 @@ decl_module! { /// - Writes: Number of items /// # #[weight = ( - T::DbWeight::get().writes(items.len() as Weight) - .saturating_add((items.len() as Weight).saturating_mul(600_000)), + T::SystemWeightInfo::set_storage(items.len() as u32), DispatchClass::Operational, )] fn set_storage(origin, items: Vec) { @@ -673,8 +678,7 @@ decl_module! { /// - Writes: Number of items /// # #[weight = ( - T::DbWeight::get().writes(keys.len() as Weight) - .saturating_add((keys.len() as Weight).saturating_mul(400_000)), + T::SystemWeightInfo::kill_storage(keys.len() as u32), DispatchClass::Operational, )] fn kill_storage(origin, keys: Vec) { @@ -696,8 +700,7 @@ decl_module! { /// - Writes: Number of subkeys + 1 /// # #[weight = ( - T::DbWeight::get().writes(Weight::from(*_subkeys) + 1) - .saturating_add((Weight::from(*_subkeys) + 1).saturating_mul(850_000)), + T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), DispatchClass::Operational, )] fn kill_prefix(origin, prefix: Key, _subkeys: u32) { @@ -715,7 +718,7 @@ decl_module! { /// Base Weight: 8.626 µs /// No DB Read or Write operations because caller is already in overlay /// # - #[weight = (10_000_000, DispatchClass::Operational)] + #[weight = (T::SystemWeightInfo::suicide(), DispatchClass::Operational)] pub fn suicide(origin) { let who = ensure_signed(origin)?; let account = Account::::get(&who); diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index d7c4d1c9e7b20d6717e1e940c1ef1d391c01a460..cd67a74114073d38191fe9dd343c064e7b667d78 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -101,7 +101,7 @@ impl Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = Version; - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = u32; type OnNewAccount = (); type OnKilledAccount = RecordKilled; diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index bb9a3266e2d345d2236ee9776e12a4beb7f5452a..5a99c5d02c5afc884b96832e53017e1bf085fd59 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-timestamp" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME Timestamp Module" documentation = "https://docs.rs/pallet-timestamp" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,19 +17,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io", optional = true } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/timestamp" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io", optional = true } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } +sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5610caca4da5115a9ec6340ec58d90d8742eed58 --- /dev/null +++ b/frame/timestamp/README.md @@ -0,0 +1,74 @@ +# Timestamp Module + +The Timestamp module provides functionality to get and set the on-chain time. + +- [`timestamp::Trait`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/enum.Call.html) +- [`Module`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/struct.Module.html) + +## Overview + +The Timestamp module allows the validators to set and validate a timestamp with each block. + +It uses inherents for timestamp data, which is provided by the block author and validated/verified +by other validators. The timestamp can be set only once per block and must be set each block. +There could be a constraint on how much time must pass before setting the new timestamp. + +**NOTE:** The Timestamp module is the recommended way to query the on-chain time instead of using +an approach based on block numbers. The block number based time measurement can cause issues +because of cumulative calculation errors and hence should be avoided. + +## Interface + +### Dispatchable Functions + +* `set` - Sets the current time. + +### Public functions + +* `get` - Gets the current time for the current block. If this function is called prior to +setting the timestamp, it will return the timestamp of the previous block. + +### Trait Getters + +* `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. + +## Usage + +The following example shows how to use the Timestamp module in your custom module to query the current timestamp. + +### Prerequisites + +Import the Timestamp module into your custom module and derive the module configuration +trait from the timestamp trait. + +### Get current timestamp + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; + +pub trait Trait: timestamp::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn get_time(origin) -> dispatch::DispatchResult { + let _sender = ensure_signed(origin)?; + let _now = >::get(); + Ok(()) + } + } +} +``` + +### Example from the FRAME + +The [Session module](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses +the Timestamp module for session management. + +## Related Modules + +* [Session](https://docs.rs/pallet-timestamppallet-session/latest/pallet_session/) + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index 9b1c976229e665e330e62788d5a338589b4427c6..1cd0f15ca01b936956694f1ade3e56c0b6f4859e 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -23,7 +23,7 @@ use super::*; use sp_std::prelude::*; use frame_system::RawOrigin; use frame_support::{ensure, traits::OnFinalize}; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{benchmarks, TrackedStorageKey}; use crate::Module as Timestamp; @@ -34,8 +34,14 @@ benchmarks! { set { let t in 1 .. MAX_TIME; + // Ignore write to `DidUpdate` since it transient. + let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + frame_benchmarking::benchmarking::add_to_whitelist(TrackedStorageKey { + key: did_update_key, + has_been_read: false, + has_been_written: true, + }); }: _(RawOrigin::None, t.into()) - verify { ensure!(Timestamp::::now() == t.into(), "Time was not set."); } @@ -44,8 +50,10 @@ benchmarks! { let t in 1 .. MAX_TIME; Timestamp::::set(RawOrigin::None.into(), t.into())?; ensure!(DidUpdate::exists(), "Time was not set."); + // Ignore read/write to `DidUpdate` since it is transient. + let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + frame_benchmarking::benchmarking::add_to_whitelist(did_update_key.into()); }: { Timestamp::::on_finalize(t.into()); } - verify { ensure!(!DidUpdate::exists(), "Time was not removed."); } diff --git a/frame/timestamp/src/default_weights.rs b/frame/timestamp/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..726b3444e2532eec6b1cc9df9305ea4f95eb3548 --- /dev/null +++ b/frame/timestamp/src/default_weights.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 1177165abed8c39e5230176fff844b951eec661c..959382c07b610e28ae29314b8c5299d45555532d 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -93,6 +93,7 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +mod default_weights; use sp_std::{result, cmp}; use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier}; @@ -116,13 +117,8 @@ use sp_timestamp::{ }; pub trait WeightInfo { - fn set(t: u32, ) -> Weight; - fn on_finalize(t: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn set(_t: u32, ) -> Weight { 1_000_000_000 } - fn on_finalize(_t: u32, ) -> Weight { 1_000_000_000 } + fn set() -> Weight; + fn on_finalize() -> Weight; } /// The module configuration trait @@ -166,12 +162,9 @@ decl_module! { /// - `O(T)` where `T` complexity of `on_timestamp_set` /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in `on_finalize`) /// - 1 event handler `on_timestamp_set` `O(T)`. - /// - Benchmark: 7.678 (min squares analysis) - /// - NOTE: This benchmark was done for a runtime with insignificant `on_timestamp_set` handlers. - /// New benchmarking is needed when adding new handlers. /// # #[weight = ( - T::DbWeight::get().reads_writes(2, 1) + 8_000_000, + T::WeightInfo::set(), DispatchClass::Mandatory )] fn set(origin, #[compact] now: T::Moment) { @@ -191,13 +184,12 @@ decl_module! { /// dummy `on_initialize` to return the weight used in `on_finalize`. fn on_initialize() -> Weight { // weight of `on_finalize` - 5_000_000 + T::WeightInfo::on_finalize() } /// # /// - `O(1)` /// - 1 storage deletion (codec `O(1)`). - /// - Benchmark: 4.928 µs (min squares analysis) /// # fn on_finalize() { assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); @@ -347,7 +339,7 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index e48488a7bcd786ac915d2ed13419878b1128c267..1fa4521900421477c8c7e5941d883d6156d8f418 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-transaction-payment" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage transaction payments" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "./rpc/runtime-api" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "./rpc/runtime-api" } smallvec = "1.4.1" -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io", default-features = false } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core", default-features = false } +sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } +sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } [dev-dependencies] -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } +pallet-balances = { version = "2.0.0", path = "../balances" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } [features] default = ["std"] diff --git a/frame/transaction-payment/README.md b/frame/transaction-payment/README.md new file mode 100644 index 0000000000000000000000000000000000000000..10ad9579e92b7e66b8a87af0e9962cfc4ee77ae8 --- /dev/null +++ b/frame/transaction-payment/README.md @@ -0,0 +1,16 @@ +# Transaction Payment Module + +This module provides the basic logic needed to pay the absolute minimum amount needed for a +transaction to be included. This includes: + - _weight fee_: A fee proportional to amount of weight a transaction consumes. + - _length fee_: A fee proportional to the encoded length of the transaction. + - _tip_: An optional tip. Tip increases the priority of the transaction, giving it a higher + chance to be included by the transaction queue. + +Additionally, this module allows one to configure: + - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. + - A means of updating the fee for the next block, via defining a multiplier, based on the + final state of the chain at the end of the previous block. This can be configured via + [`Trait::FeeMultiplierUpdate`] + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 280a4dc490eee6bdf3b222203184fcf644de833e..26f073e60237f7f42df5a6d3a5cd5deb1c4b0371 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -1,25 +1,26 @@ [package] name = "pallet-transaction-payment-rpc" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "RPC interface for the transaction payment module." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1" } -jsonrpc-core = "14.2.0" -jsonrpc-core-client = "14.2.0" -jsonrpc-derive = "14.2.1" -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-rc5", path = "../../../primitives/rpc" } +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-rc5", path = "./runtime-api" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", path = "./runtime-api" } diff --git a/frame/transaction-payment/rpc/README.md b/frame/transaction-payment/rpc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..21a8a7d37cae0a14d68052b6a07482e731e3b784 --- /dev/null +++ b/frame/transaction-payment/rpc/README.md @@ -0,0 +1,3 @@ +RPC interface for the transaction payment module. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index dacc7bc642372045e925d08d0b35fb119244cd8a..881c4330eb9a40babf17170187576ec50bea71f1 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -1,23 +1,24 @@ [package] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "RPC runtime API for transaction payment FRAME pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/api" } +sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../../support" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../../../support" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/transaction-payment/rpc/runtime-api/README.md b/frame/transaction-payment/rpc/runtime-api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e453d9a3b7c8a0ff91baa003a5d3656293b28f5b --- /dev/null +++ b/frame/transaction-payment/rpc/runtime-api/README.md @@ -0,0 +1,3 @@ +Runtime API definition for transaction payment module. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 244b4280ade08f8daa638d599e8e038b34cbc08f..09caae54cf348edc770969598f6e9721d2380acc 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -467,6 +467,23 @@ impl ChargeTransactionPayment where Err(_) => Err(InvalidTransaction::Payment.into()), } } + + /// Get an appropriate priority for a transaction with the given length and info. + /// + /// This will try and optimise the `fee/weight` `fee/length`, whichever is consuming more of the + /// maximum corresponding limit. + /// + /// For example, if a transaction consumed 1/4th of the block length and half of the weight, its + /// final priority is `fee * min(2, 4) = fee * 2`. If it consumed `1/4th` of the block length + /// and the entire block weight `(1/1)`, its priority is `fee * min(1, 4) = fee * 1`. This means + /// that the transaction which consumes more resources (either length or weight) with the same + /// `fee` ends up having lower priority. + fn get_priority(len: usize, info: &DispatchInfoOf, final_fee: BalanceOf) -> TransactionPriority { + let weight_saturation = T::MaximumBlockWeight::get() / info.weight.max(1); + let len_saturation = T::MaximumBlockLength::get() as u64 / (len as u64).max(1); + let coefficient: BalanceOf = weight_saturation.min(len_saturation).saturated_into::>(); + final_fee.saturating_mul(coefficient).saturated_into::() + } } impl sp_std::fmt::Debug for ChargeTransactionPayment { @@ -499,12 +516,10 @@ impl SignedExtension for ChargeTransactionPayment whe len: usize, ) -> TransactionValidity { let (fee, _) = self.withdraw_fee(who, info, len)?; - - let mut r = ValidTransaction::default(); - // NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which - // will be a bit more than setting the priority to tip. For now, this is enough. - r.priority = fee.saturated_into::(); - Ok(r) + Ok(ValidTransaction { + priority: Self::get_priority(len, info, fee), + ..Default::default() + }) } fn pre_dispatch( @@ -639,7 +654,7 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -656,6 +671,7 @@ mod tests { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type MaxLocks = (); type WeightInfo = (); } thread_local! { diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index a2b316a6c6a6ff6a60af01ed8d5ffc289b8574d1..fd2d103e9f3350b22c3d55ea40cb37e18b1d5d58 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-treasury" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage treasury" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-rc5", default-features = false, path = "../balances" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io ={ version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } +sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } [features] default = ["std"] @@ -41,4 +42,5 @@ std = [ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] diff --git a/frame/treasury/README.md b/frame/treasury/README.md new file mode 100644 index 0000000000000000000000000000000000000000..424b8e0eedf9952717ad1d2c75c3b7e01e5dcc70 --- /dev/null +++ b/frame/treasury/README.md @@ -0,0 +1,118 @@ +# Treasury Module + +The Treasury module provides a "pot" of funds that can be managed by stakeholders in the +system and a structure for making spending proposals from this pot. + +- [`treasury::Trait`](https://docs.rs/pallet-treasury/latest/pallet_treasury/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-treasury/latest/pallet_treasury/enum.Call.html) + +## Overview + +The Treasury Module itself provides the pot to store funds, and a means for stakeholders to +propose, approve, and deny expenditures. The chain will need to provide a method (e.g. +inflation, fees) for collecting funds. + +By way of example, the Council could vote to fund the Treasury with a portion of the block +reward and use the funds to pay developers. + +### Tipping + +A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be +given without first having a pre-determined stakeholder group come to consensus on how much +should be paid. + +A group of `Tippers` is determined through the config `Trait`. After half of these have declared +some amount that they believe a particular reported reason deserves, then a countdown period is +entered where any remaining members can declare their tip amounts also. After the close of the +countdown period, the median of all declared tips is paid to the reported beneficiary, along +with any finders fee, in case of a public (and bonded) original report. + +### Bounty + +A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that +needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after +the bounty is approved and funded by Council, to be delegated +with the responsibility of assigning a payout address once the specified set of objectives is completed. + +After the Council has activated a bounty, it delegates the work that requires expertise to a curator +in exchange of a deposit. Once the curator accepts the bounty, they +get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout +address, the curator fee and the return of the curator deposit. The +delay allows for intervention through regular democracy. The Council gets to unassign the curator, +resulting in a new curator election. The Council also gets to cancel +the bounty if deemed necessary before assigning a curator or once the bounty is active or payout +is pending, resulting in the slash of the curator's deposit. + + +### Terminology + +- **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. +- **Beneficiary:** An account who will receive the funds from a proposal iff +the proposal is approved. +- **Deposit:** Funds that a proposer must lock when making a proposal. The +deposit will be returned or slashed if the proposal is approved or rejected +respectively. +- **Pot:** Unspent funds accumulated by the treasury module. + +Tipping protocol: +- **Tipping:** The process of gathering declarations of amounts to tip and taking the median + amount to be transferred from the treasury to a beneficiary account. +- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a + particular individual (identified by an account ID) is worthy of a recognition by the + treasury. +- **Finder:** The original public reporter of some reason for tipping. +- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, + rather than the main beneficiary. + +Bounty: +- **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by +the Treasury. +- **Proposer:** An account proposing a bounty spending. +- **Curator:** An account managing the bounty and assigning a payout address receiving the reward +for the completion of work. +- **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on +deposit per byte within the bounty description. +- **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit +is returned when/if the bounty is completed. +- **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is +rewarded. +- **Payout address:** The account to which the total or part of the bounty is assigned to. +- **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. +- **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. + +## Interface + +### Dispatchable Functions + +General spending/proposal protocol: +- `propose_spend` - Make a spending proposal and stake the required deposit. +- `set_pot` - Set the spendable balance of funds. +- `configure` - Configure the module's proposal requirements. +- `reject_proposal` - Reject a proposal, slashing the deposit. +- `approve_proposal` - Accept the proposal, returning the deposit. + +Tipping protocol: +- `report_awesome` - Report something worthy of a tip and register for a finders fee. +- `retract_tip` - Retract a previous (finders fee registered) report. +- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +- `tip` - Declare or redeclare an amount to tip for a particular reason. +- `close_tip` - Close and pay out a tip. + +Bounty protocol: +- `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of +tasks and stake the required deposit. +- `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. +- `propose_curator` - Assign an account to a bounty as candidate curator. +- `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +- `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +- `award_bounty` - Close and pay out the specified amount for the completed work. +- `claim_bounty` - Claim a specific bounty amount from the Payout Address. +- `unassign_curator` - Unassign an accepted curator from a specific earmark. +- `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. + + +## GenesisConfig + +The Treasury module depends on the [`GenesisConfig`](https://docs.rs/pallet-treasury/latest/pallet_treasury/struct.GenesisConfig.html). + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 8dddf3581aef2f1cd459c615465af1988778ed24..5224c1446f45241ad7ffa1111dbcf6f9bcecf7bb 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; use frame_support::traits::OnInitialize; use crate::Module as Treasury; @@ -30,13 +30,13 @@ use crate::Module as Treasury; const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. -fn setup_proposal(u: u32) -> ( +fn setup_proposal, I: Instance>(u: u32) -> ( T::AccountId, - BalanceOf, + BalanceOf, ::Source, ) { let caller = account("caller", u, SEED); - let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100.into()); + let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100.into()); let _ = T::Currency::make_free_balance_be(&caller, value); let beneficiary = account("beneficiary", u, SEED); let beneficiary_lookup = T::Lookup::unlookup(beneficiary); @@ -44,10 +44,10 @@ fn setup_proposal(u: u32) -> ( } // Create the pre-requisite information needed to create a `report_awesome`. -fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { - let caller = account("caller", 0, SEED); +fn setup_awesome, I: Instance>(length: u32) -> (T::AccountId, Vec, T::AccountId) { + let caller = whitelisted_caller(); let value = T::TipReportDepositBase::get() - + T::TipReportDepositPerByte::get() * length.into() + + T::DataDepositPerByte::get() * length.into() + T::Currency::minimum_balance(); let _ = T::Currency::make_free_balance_be(&caller, value); let reason = vec![0; length as usize]; @@ -56,8 +56,8 @@ fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) } // Create the pre-requisite information needed to call `tip_new`. -fn setup_tip(r: u32, t: u32) -> - Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> +fn setup_tip, I: Instance>(r: u32, t: u32) -> + Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> { let tippers_count = T::Tippers::count(); @@ -77,13 +77,15 @@ fn setup_tip(r: u32, t: u32) -> // Create `t` new tips for the tip proposal with `hash`. // This function automatically makes the tip able to close. -fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result<(), &'static str> { +fn create_tips, I: Instance>(t: u32, hash: T::Hash, value: BalanceOf) -> + Result<(), &'static str> +{ for i in 0 .. t { let caller = account("member", i, SEED); ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); - Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; + Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; } - Tips::::mutate(hash, |maybe_tip| { + Tips::::mutate(hash, |maybe_tip| { if let Some(open_tip) = maybe_tip { open_tip.closes = Some(T::BlockNumber::zero()); } @@ -92,83 +94,144 @@ fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result<( } // Create proposals that are approved for use in `on_initialize`. -fn create_approved_proposals(n: u32) -> Result<(), &'static str> { +fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'static str> { for i in 0 .. n { - let (caller, value, lookup) = setup_proposal::(i); - Treasury::::propose_spend( + let (caller, value, lookup) = setup_proposal::(i); + Treasury::::propose_spend( RawOrigin::Signed(caller).into(), value, lookup )?; - let proposal_id = ProposalCount::get() - 1; - Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; + let proposal_id = >::get() - 1; + Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; + } + ensure!(>::get().len() == n as usize, "Not all approved"); + Ok(()) +} + +// Create bounties that are approved for use in `on_initialize`. +fn create_approved_bounties, I: Instance>(n: u32) -> Result<(), &'static str> { + for i in 0 .. n { + let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); + Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; } - ensure!(Approvals::get().len() == n as usize, "Not all approved"); + ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); Ok(()) } +// Create the pre-requisite information needed to create a treasury `propose_bounty`. +fn setup_bounty, I: Instance>(u: u32, d: u32) -> ( + T::AccountId, + T::AccountId, + BalanceOf, + BalanceOf, + Vec, +) { + let caller = account("caller", u, SEED); + let value: BalanceOf = T::Currency::minimum_balance().saturating_mul(100.into()); + let fee = T::Currency::minimum_balance().saturating_mul(2.into()); + let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * MAX_BYTES.into(); + let _ = T::Currency::make_free_balance_be(&caller, deposit); + let curator = account("curator", u, SEED); + let _ = T::Currency::make_free_balance_be(&curator, fee / 2.into()); + let reason = vec![0; d as usize]; + (caller, curator, fee, value, reason) +} + +fn create_bounty, I: Instance>() -> Result<( + ::Source, + BountyIndex, +), &'static str> { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; + Treasury::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; + Ok((curator_lookup, bounty_id)) +} + +fn setup_pod_account, I: Instance>() { + let pot_account = Treasury::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); +} + const MAX_BYTES: u32 = 16384; const MAX_TIPPERS: u32 = 100; -benchmarks! { +benchmarks_instance! { _ { } propose_spend { - let u in 0 .. 1000; - let (caller, value, beneficiary_lookup) = setup_proposal::(u); + let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), value, beneficiary_lookup) reject_proposal { - let u in 0 .. 1000; - let (caller, value, beneficiary_lookup) = setup_proposal::(u); - Treasury::::propose_spend( + let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); + Treasury::::propose_spend( RawOrigin::Signed(caller).into(), value, beneficiary_lookup )?; - let proposal_id = ProposalCount::get() - 1; + let proposal_id = Treasury::::proposal_count() - 1; }: _(RawOrigin::Root, proposal_id) approve_proposal { - let u in 0 .. 1000; - let (caller, value, beneficiary_lookup) = setup_proposal::(u); - Treasury::::propose_spend( + let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); + Treasury::::propose_spend( RawOrigin::Signed(caller).into(), value, beneficiary_lookup )?; - let proposal_id = ProposalCount::get() - 1; + let proposal_id = Treasury::::proposal_count() - 1; }: _(RawOrigin::Root, proposal_id) report_awesome { let r in 0 .. MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); + let (caller, reason, awesome_person) = setup_awesome::(r); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), reason, awesome_person) retract_tip { let r in 0 .. MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - Treasury::::report_awesome( + let (caller, reason, awesome_person) = setup_awesome::(r); + Treasury::::report_awesome( RawOrigin::Signed(caller.clone()).into(), reason.clone(), awesome_person.clone() )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), hash) tip_new { let r in 0 .. MAX_BYTES; let t in 1 .. MAX_TIPPERS; - let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), reason, beneficiary, value) tip { let t in 1 .. MAX_TIPPERS; - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Treasury::::tip_new( + Treasury::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), beneficiary.clone(), @@ -176,23 +239,24 @@ benchmarks! { )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t - 1, hash.clone(), value)?; + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t - 1, hash.clone(), value)?; let caller = account("member", t - 1, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), hash, value) close_tip { let t in 1 .. MAX_TIPPERS; // Make sure pot is funded - let pot_account = Treasury::::account_id(); - let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); - let _ = T::Currency::make_free_balance_be(&pot_account, value); + setup_pod_account::(); // Set up a new tip proposal - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Treasury::::tip_new( + Treasury::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), beneficiary.clone(), @@ -202,20 +266,120 @@ benchmarks! { // Create a bunch of tips let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t, hash.clone(), value)?; + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t, hash.clone(), value)?; let caller = account("caller", t, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), hash) - on_initialize { + propose_bounty { + let d in 0 .. MAX_BYTES; + + let (caller, curator, fee, value, description) = setup_bounty::(0, d); + }: _(RawOrigin::Signed(caller), value, description) + + approve_bounty { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + }: _(RawOrigin::Root, bounty_id) + + propose_curator { + setup_pod_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) + + // Worst case when curator is inactive and any sender unassigns the curator. + unassign_curator { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::::get() - 1; + frame_system::Module::::set_block_number(T::BountyUpdatePeriod::get() + 1.into()); + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), bounty_id) + + accept_curator { + setup_pod_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; + }: _(RawOrigin::Signed(curator), bounty_id) + + award_bounty { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); + }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) + + claim_bounty { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + + let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); + Treasury::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; + + frame_system::Module::::set_block_number(T::BountyDepositPayoutDelay::get()); + + }: _(RawOrigin::Signed(curator), bounty_id) + + close_bounty_proposed { + setup_pod_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); + Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + + close_bounty_active { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + + extend_bounty_expiry { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Treasury::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) + + on_initialize_proposals { let p in 0 .. 100; - let pot_account = Treasury::::account_id(); - let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); - let _ = T::Currency::make_free_balance_be(&pot_account, value); - create_approved_proposals::(p)?; + setup_pod_account::(); + create_approved_proposals::(p)?; + }: { + Treasury::::on_initialize(T::BlockNumber::zero()); + } + + on_initialize_bounties { + let b in 0 .. 100; + setup_pod_account::(); + create_approved_bounties::(b)?; }: { - Treasury::::on_initialize(T::BlockNumber::zero()); + Treasury::::on_initialize(T::BlockNumber::zero()); } } @@ -236,7 +400,18 @@ mod tests { assert_ok!(test_benchmark_tip_new::()); assert_ok!(test_benchmark_tip::()); assert_ok!(test_benchmark_close_tip::()); - assert_ok!(test_benchmark_on_initialize::()); + assert_ok!(test_benchmark_propose_bounty::()); + assert_ok!(test_benchmark_approve_bounty::()); + assert_ok!(test_benchmark_propose_curator::()); + assert_ok!(test_benchmark_unassign_curator::()); + assert_ok!(test_benchmark_accept_curator::()); + assert_ok!(test_benchmark_award_bounty::()); + assert_ok!(test_benchmark_claim_bounty::()); + assert_ok!(test_benchmark_close_bounty_proposed::()); + assert_ok!(test_benchmark_close_bounty_active::()); + assert_ok!(test_benchmark_extend_bounty_expiry::()); + assert_ok!(test_benchmark_on_initialize_proposals::()); + assert_ok!(test_benchmark_on_initialize_bounties::()); }); } } diff --git a/frame/treasury/src/default_weights.rs b/frame/treasury/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..faf6c1a04573d69fbaa9b9b982bff836ccd81d88 --- /dev/null +++ b/frame/treasury/src/default_weights.rs @@ -0,0 +1,139 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn propose_spend() -> Weight { + (79604000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_proposal() -> Weight { + (61001000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn approve_proposal() -> Weight { + (17835000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn report_awesome(r: u32, ) -> Weight { + (101602000 as Weight) + .saturating_add((2000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + // WARNING! Some components were not used: ["r"] + fn retract_tip() -> Weight { + (82970000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn tip_new(r: u32, t: u32, ) -> Weight { + (63995000 as Weight) + .saturating_add((2000 as Weight).saturating_mul(r as Weight)) + .saturating_add((153000 as Weight).saturating_mul(t as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn tip(t: u32, ) -> Weight { + (46765000 as Weight) + .saturating_add((711000 as Weight).saturating_mul(t as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn close_tip(t: u32, ) -> Weight { + (160874000 as Weight) + .saturating_add((379000 as Weight).saturating_mul(t as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn propose_bounty(d: u32, ) -> Weight { + (86198000 as Weight) + .saturating_add((1000 as Weight).saturating_mul(d as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn approve_bounty() -> Weight { + (23063000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn propose_curator() -> Weight { + (18890000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn unassign_curator() -> Weight { + (66768000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn accept_curator() -> Weight { + (69131000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn award_bounty() -> Weight { + (48184000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn claim_bounty() -> Weight { + (243104000 as Weight) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn close_bounty_proposed() -> Weight { + (65917000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn close_bounty_active() -> Weight { + (157232000 as Weight) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } + fn extend_bounty_expiry() -> Weight { + (46216000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn on_initialize_proposals(p: u32, ) -> Weight { + (119765000 as Weight) + .saturating_add((108368000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + } + fn on_initialize_bounties(b: u32, ) -> Weight { + (112536000 as Weight) + .saturating_add((107132000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index af8d4a3cd0c2b0fec725a8e80ca4e81a5054855d..4d76f2f532151955460e42ed1967c16bf85dad52 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -44,6 +44,23 @@ //! countdown period, the median of all declared tips is paid to the reported beneficiary, along //! with any finders fee, in case of a public (and bonded) original report. //! +//! ### Bounty +//! +//! A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that +//! needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after +//! the bounty is approved and funded by Council, to be delegated +//! with the responsibility of assigning a payout address once the specified set of objectives is completed. +//! +//! After the Council has activated a bounty, it delegates the work that requires expertise to a curator +//! in exchange of a deposit. Once the curator accepts the bounty, they +//! get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout +//! address, the curator fee and the return of the curator deposit. The +//! delay allows for intervention through regular democracy. The Council gets to unassign the curator, +//! resulting in a new curator election. The Council also gets to cancel +//! the bounty if deemed necessary before assigning a curator or once the bounty is active or payout +//! is pending, resulting in the slash of the curator's deposit. +//! +//! //! ### Terminology //! //! - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. @@ -64,6 +81,22 @@ //! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, //! rather than the main beneficiary. //! +//! Bounty: +//! - **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by +//! the Treasury. +//! - **Proposer:** An account proposing a bounty spending. +//! - **Curator:** An account managing the bounty and assigning a payout address receiving the reward +//! for the completion of work. +//! - **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on +//! deposit per byte within the bounty description. +//! - **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit +//! is returned when/if the bounty is completed. +//! - **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is +//! rewarded. +//! - **Payout address:** The account to which the total or part of the bounty is assigned to. +//! - **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. +//! - **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. +//! //! ## Interface //! //! ### Dispatchable Functions @@ -82,6 +115,19 @@ //! - `tip` - Declare or redeclare an amount to tip for a particular reason. //! - `close_tip` - Close and pay out a tip. //! +//! Bounty protocol: +//! - `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of +//! tasks and stake the required deposit. +//! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. +//! - `propose_curator` - Assign an account to a bounty as candidate curator. +//! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +//! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +//! - `award_bounty` - Close and pay out the specified amount for the completed work. +//! - `claim_bounty` - Claim a specific bounty amount from the Payout Address. +//! - `unassign_curator` - Unassign an accepted curator from a specific earmark. +//! - `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. +//! +//! //! ## GenesisConfig //! //! The Treasury module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). @@ -93,12 +139,13 @@ use serde::{Serialize, Deserialize}; use sp_std::prelude::*; use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error, Parameter}; use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::KeepAlive, + Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{KeepAlive, AllowDeath}, ReservableCurrency, WithdrawReason }; -use sp_runtime::{Permill, ModuleId, Percent, RuntimeDebug, traits::{ +use sp_runtime::{Permill, ModuleId, Percent, RuntimeDebug, DispatchResult, traits::{ Zero, StaticLookup, AccountIdConversion, Saturating, Hash, BadOrigin }}; +use frame_support::dispatch::DispatchResultWithPostInfo; use frame_support::weights::{Weight, DispatchClass}; use frame_support::traits::{Contains, ContainsLengthBound, EnsureOrigin}; use codec::{Encode, Decode}; @@ -106,36 +153,39 @@ use frame_system::{self as system, ensure_signed}; mod tests; mod benchmarking; +mod default_weights; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = <::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +type PositiveImbalanceOf = + <>::Currency as Currency<::AccountId>>::PositiveImbalance; +type NegativeImbalanceOf = + <>::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait WeightInfo { - fn propose_spend(u: u32, ) -> Weight; - fn reject_proposal(u: u32, ) -> Weight; - fn approve_proposal(u: u32, ) -> Weight; + fn propose_spend() -> Weight; + fn reject_proposal() -> Weight; + fn approve_proposal() -> Weight; fn report_awesome(r: u32, ) -> Weight; - fn retract_tip(r: u32, ) -> Weight; + fn retract_tip() -> Weight; fn tip_new(r: u32, t: u32, ) -> Weight; fn tip(t: u32, ) -> Weight; fn close_tip(t: u32, ) -> Weight; - fn on_initialize(p: u32, ) -> Weight; + fn propose_bounty(r: u32, ) -> Weight; + fn approve_bounty() -> Weight; + fn propose_curator() -> Weight; + fn unassign_curator() -> Weight; + fn accept_curator() -> Weight; + fn award_bounty() -> Weight; + fn claim_bounty() -> Weight; + fn close_bounty_proposed() -> Weight; + fn close_bounty_active() -> Weight; + fn extend_bounty_expiry() -> Weight; + fn on_initialize_proposals(p: u32, ) -> Weight; + fn on_initialize_bounties(b: u32, ) -> Weight; } -impl WeightInfo for () { - fn propose_spend(_u: u32, ) -> Weight { 1_000_000_000 } - fn reject_proposal(_u: u32, ) -> Weight { 1_000_000_000 } - fn approve_proposal(_u: u32, ) -> Weight { 1_000_000_000 } - fn report_awesome(_r: u32, ) -> Weight { 1_000_000_000 } - fn retract_tip(_r: u32, ) -> Weight { 1_000_000_000 } - fn tip_new(_r: u32, _t: u32, ) -> Weight { 1_000_000_000 } - fn tip(_t: u32, ) -> Weight { 1_000_000_000 } - fn close_tip(_t: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize(_p: u32, ) -> Weight { 1_000_000_000 } -} - -pub trait Trait: frame_system::Trait { +pub trait Trait: frame_system::Trait { /// The treasury's module id, used for deriving its sovereign account ID. type ModuleId: Get; @@ -160,23 +210,23 @@ pub trait Trait: frame_system::Trait { type TipFindersFee: Get; /// The amount held on deposit for placing a tip report. - type TipReportDepositBase: Get>; + type TipReportDepositBase: Get>; - /// The amount held on deposit per byte within the tip report reason. - type TipReportDepositPerByte: Get>; + /// The amount held on deposit per byte within the tip report reason or bounty description. + type DataDepositPerByte: Get>; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; - /// Handler for the unbalanced decrease when slashing for a rejected proposal. - type ProposalRejection: OnUnbalanced>; + /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. + type OnSlash: OnUnbalanced>; /// Fraction of a proposal's value that should be bonded in order to place the proposal. /// An accepted proposal gets these back. A rejected proposal does not. type ProposalBond: Get; /// Minimum amount of funds that should be placed in a deposit for making a proposal. - type ProposalBondMinimum: Get>; + type ProposalBondMinimum: Get>; /// Period between successive spends. type SpendPeriod: Get; @@ -184,8 +234,26 @@ pub trait Trait: frame_system::Trait { /// Percentage of spare funds (if any) that are burnt per spend period. type Burn: Get; + /// The amount held on deposit for placing a bounty proposal. + type BountyDepositBase: Get>; + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + type BountyDepositPayoutDelay: Get; + + /// Bounty duration in blocks. + type BountyUpdatePeriod: Get; + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + type BountyCuratorDeposit: Get; + + /// Minimum value for a bounty. + type BountyValueMinimum: Get>; + + /// Maximum acceptable reason length. + type MaximumReasonLength: Get; + /// Handler for the unbalanced decrease when treasury funds are burned. - type BurnDestination: OnUnbalanced>; + type BurnDestination: OnUnbalanced>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -235,15 +303,67 @@ pub struct OpenTip< finders_fee: bool, } +/// An index of a bounty. Just a `u32`. +pub type BountyIndex = u32; + +/// A bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct Bounty { + /// The account proposing it. + proposer: AccountId, + /// The (total) amount that should be paid if the bounty is rewarded. + value: Balance, + /// The curator fee. Included in value. + fee: Balance, + /// The deposit of curator. + curator_deposit: Balance, + /// The amount held on deposit (reserved) for making this proposal. + bond: Balance, + /// The status of this bounty. + status: BountyStatus, +} + +/// The status of a bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum BountyStatus { + /// The bounty is proposed and waiting for approval. + Proposed, + /// The bounty is approved and waiting to become active at next spend period. + Approved, + /// The bounty is funded and waiting for curator assignment. + Funded, + /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the curator. + CuratorProposed { + /// The assigned curator of this bounty. + curator: AccountId, + }, + /// The bounty is active and waiting to be awarded. + Active { + /// The curator of this bounty. + curator: AccountId, + /// An update from the curator is due by this block, else they are considered inactive. + update_due: BlockNumber, + }, + /// The bounty is awarded and waiting to released after a delay. + PendingPayout { + /// The curator of this bounty. + curator: AccountId, + /// The beneficiary of the bounty. + beneficiary: AccountId, + /// When the bounty can be claimed. + unlock_at: BlockNumber, + }, +} + decl_storage! { - trait Store for Module as Treasury { + trait Store for Module, I: Instance=DefaultInstance> as Treasury { /// Number of proposals that have been made. ProposalCount get(fn proposal_count): ProposalIndex; /// Proposals that have been made. Proposals get(fn proposals): map hasher(twox_64_concat) ProposalIndex - => Option>>; + => Option>>; /// Proposal indices that have been approved but not yet awarded. Approvals get(fn approvals): Vec; @@ -253,17 +373,31 @@ decl_storage! { /// guaranteed to be a secure hash. pub Tips get(fn tips): map hasher(twox_64_concat) T::Hash - => Option, T::BlockNumber, T::Hash>>; + => Option, T::BlockNumber, T::Hash>>; /// Simple preimage lookup from the reason's hash to the original data. Again, has an /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; + + /// Number of bounty proposals that have been made. + pub BountyCount get(fn bounty_count): BountyIndex; + + /// Bounties that have been made. + pub Bounties get(fn bounties): + map hasher(twox_64_concat) BountyIndex + => Option, T::BlockNumber>>; + + /// The description of each bounty. + pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; + + /// Bounty indices that have been approved but not yet funded. + pub BountyApprovals get(fn bounty_approvals): Vec; } add_extra_genesis { build(|_config| { // Create Treasury account let _ = T::Currency::make_free_balance_be( - &>::account_id(), + &>::account_id(), T::Currency::minimum_balance(), ); }); @@ -271,44 +405,59 @@ decl_storage! { } decl_event!( - pub enum Event + pub enum Event where - Balance = BalanceOf, + Balance = BalanceOf, ::AccountId, ::Hash, { - /// New proposal. [proposal_index] + /// New proposal. \[proposal_index\] Proposed(ProposalIndex), - /// We have ended a spend period and will now allocate funds. [budget_remaining] + /// We have ended a spend period and will now allocate funds. \[budget_remaining\] Spending(Balance), - /// Some funds have been allocated. [proposal_index, award, beneficiary] + /// Some funds have been allocated. \[proposal_index, award, beneficiary\] Awarded(ProposalIndex, Balance, AccountId), - /// A proposal was rejected; funds were slashed. [proposal_index, slashed] + /// A proposal was rejected; funds were slashed. \[proposal_index, slashed\] Rejected(ProposalIndex, Balance), - /// Some of our funds have been burnt. [burn] + /// Some of our funds have been burnt. \[burn\] Burnt(Balance), - /// Spending has finished; this is the amount that rolls over until next spend. [budget_remaining] + /// Spending has finished; this is the amount that rolls over until next spend. + /// \[budget_remaining\] Rollover(Balance), - /// Some funds have been deposited. [deposit] + /// Some funds have been deposited. \[deposit\] Deposit(Balance), - /// A new tip suggestion has been opened. [tip_hash] + /// A new tip suggestion has been opened. \[tip_hash\] NewTip(Hash), - /// A tip suggestion has reached threshold and is closing. [tip_hash] + /// A tip suggestion has reached threshold and is closing. \[tip_hash\] TipClosing(Hash), - /// A tip suggestion has been closed. [tip_hash, who, payout] + /// A tip suggestion has been closed. \[tip_hash, who, payout\] TipClosed(Hash, AccountId, Balance), - /// A tip suggestion has been retracted. [tip_hash] + /// A tip suggestion has been retracted. \[tip_hash\] TipRetracted(Hash), + /// New bounty proposal. [index] + BountyProposed(BountyIndex), + /// A bounty proposal was rejected; funds were slashed. [index, bond] + BountyRejected(BountyIndex, Balance), + /// A bounty proposal is funded and became active. [index] + BountyBecameActive(BountyIndex), + /// A bounty is awarded to a beneficiary. [index, beneficiary] + BountyAwarded(BountyIndex, AccountId), + /// A bounty is claimed by beneficiary. [index, payout, beneficiary] + BountyClaimed(BountyIndex, Balance, AccountId), + /// A bounty is cancelled. [index] + BountyCanceled(BountyIndex), + /// A bounty expiry is extended. [index] + BountyExtended(BountyIndex), } ); decl_error! { /// Error for the treasury module. - pub enum Error for Module { + pub enum Error for Module, I: Instance> { /// Proposer's balance is too low. InsufficientProposersBalance, - /// No proposal at that index. - InvalidProposalIndex, + /// No proposal or bounty at that index. + InvalidIndex, /// The reason given is just too big. ReasonTooBig, /// The tip was already found/started. @@ -321,17 +470,31 @@ decl_error! { StillOpen, /// The tip cannot be claimed/closed because it's still in the countdown period. Premature, + /// The bounty status is unexpected. + UnexpectedStatus, + /// Require bounty curator. + RequireCurator, + /// Invalid bounty value. + InvalidValue, + /// Invalid bounty fee. + InvalidFee, + /// A bounty payout is pending. + /// To cancel the bounty, you must unassign and slash the curator. + PendingPayout, } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module, I: Instance=DefaultInstance> + for enum Call + where origin: T::Origin + { /// Fraction of a proposal's value that should be bonded in order to place the proposal. /// An accepted proposal gets these back. A rejected proposal does not. const ProposalBond: Permill = T::ProposalBond::get(); /// Minimum amount of funds that should be placed in a deposit for making a proposal. - const ProposalBondMinimum: BalanceOf = T::ProposalBondMinimum::get(); + const ProposalBondMinimum: BalanceOf = T::ProposalBondMinimum::get(); /// Period between successive spends. const SpendPeriod: T::BlockNumber = T::SpendPeriod::get(); @@ -346,15 +509,29 @@ decl_module! { const TipFindersFee: Percent = T::TipFindersFee::get(); /// The amount held on deposit for placing a tip report. - const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); + const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); - /// The amount held on deposit per byte within the tip report reason. - const TipReportDepositPerByte: BalanceOf = T::TipReportDepositPerByte::get(); + /// The amount held on deposit per byte within the tip report reason or bounty description. + const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); /// The treasury's module id, used for deriving its sovereign account ID. const ModuleId: ModuleId = T::ModuleId::get(); - type Error = Error; + /// The amount held on deposit for placing a bounty proposal. + const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); + + const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); + + /// Maximum acceptable reason length. + const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); + + type Error = Error; fn deposit_event() = default; @@ -367,10 +544,10 @@ decl_module! { /// - DbReads: `ProposalCount`, `origin account` /// - DbWrites: `ProposalCount`, `Proposals`, `origin account` /// # - #[weight = 120_000_000 + T::DbWeight::get().reads_writes(1, 2)] + #[weight = T::WeightInfo::propose_spend()] fn propose_spend( origin, - #[compact] value: BalanceOf, + #[compact] value: BalanceOf, beneficiary: ::Source ) { let proposer = ensure_signed(origin)?; @@ -378,11 +555,11 @@ decl_module! { let bond = Self::calculate_bond(value); T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; + .map_err(|_| Error::::InsufficientProposersBalance)?; let c = Self::proposal_count(); - ProposalCount::put(c + 1); - >::insert(c, Proposal { proposer, value, beneficiary, bond }); + >::put(c + 1); + >::insert(c, Proposal { proposer, value, beneficiary, bond }); Self::deposit_event(RawEvent::Proposed(c)); } @@ -396,16 +573,16 @@ decl_module! { /// - DbReads: `Proposals`, `rejected proposer account` /// - DbWrites: `Proposals`, `rejected proposer account` /// # - #[weight = (130_000_000 + T::DbWeight::get().reads_writes(2, 2), DispatchClass::Operational)] + #[weight = (T::WeightInfo::reject_proposal(), DispatchClass::Operational)] fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { T::RejectOrigin::ensure_origin(origin)?; - let proposal = >::take(&proposal_id).ok_or(Error::::InvalidProposalIndex)?; + let proposal = >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; let value = proposal.bond; let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; - T::ProposalRejection::on_unbalanced(imbalance); + T::OnSlash::on_unbalanced(imbalance); - Self::deposit_event(Event::::Rejected(proposal_id, value)); + Self::deposit_event(Event::::Rejected(proposal_id, value)); } /// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary @@ -418,12 +595,12 @@ decl_module! { /// - DbReads: `Proposals`, `Approvals` /// - DbWrite: `Approvals` /// # - #[weight = (34_000_000 + T::DbWeight::get().reads_writes(2, 1), DispatchClass::Operational)] + #[weight = (T::WeightInfo::approve_proposal(), DispatchClass::Operational)] fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { T::ApproveOrigin::ensure_origin(origin)?; - ensure!(>::contains_key(proposal_id), Error::::InvalidProposalIndex); - Approvals::mutate(|v| v.push(proposal_id)); + ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); + Approvals::::append(proposal_id); } /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. @@ -431,7 +608,7 @@ decl_module! { /// The dispatch origin for this call must be _Signed_. /// /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `TipReportDepositPerByte` for each byte in `reason`. + /// `DataDepositPerByte` for each byte in `reason`. /// /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be /// a UTF-8-encoded URL. @@ -442,26 +619,25 @@ decl_module! { /// # /// - Complexity: `O(R)` where `R` length of `reason`. /// - encoding and hashing of 'reason' - /// - DbReads: `Reasons`, `Tips`, `who account data` - /// - DbWrites: `Tips`, `who account data` + /// - DbReads: `Reasons`, `Tips` + /// - DbWrites: `Reasons`, `Tips` /// # - #[weight = 140_000_000 + 4_000 * reason.len() as Weight + T::DbWeight::get().reads_writes(3, 2)] + #[weight = T::WeightInfo::report_awesome(reason.len() as u32)] fn report_awesome(origin, reason: Vec, who: T::AccountId) { let finder = ensure_signed(origin)?; - const MAX_SENSIBLE_REASON_LENGTH: usize = 16384; - ensure!(reason.len() <= MAX_SENSIBLE_REASON_LENGTH, Error::::ReasonTooBig); + ensure!(reason.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); + ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); let deposit = T::TipReportDepositBase::get() - + T::TipReportDepositPerByte::get() * (reason.len() as u32).into(); + + T::DataDepositPerByte::get() * (reason.len() as u32).into(); T::Currency::reserve(&finder, deposit)?; - Reasons::::insert(&reason_hash, &reason); + Reasons::::insert(&reason_hash, &reason); let tip = OpenTip { reason: reason_hash, who, @@ -471,7 +647,7 @@ decl_module! { tips: vec![], finders_fee: true }; - Tips::::insert(&hash, tip); + Tips::::insert(&hash, tip); Self::deposit_event(RawEvent::NewTip(hash)); } @@ -494,14 +670,14 @@ decl_module! { /// - DbReads: `Tips`, `origin account` /// - DbWrites: `Reasons`, `Tips`, `origin account` /// # - #[weight = 120_000_000 + T::DbWeight::get().reads_writes(1, 2)] + #[weight = T::WeightInfo::retract_tip()] fn retract_tip(origin, hash: T::Hash) { let who = ensure_signed(origin)?; - let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; - ensure!(tip.finder == who, Error::::NotFinder); + let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; + ensure!(tip.finder == who, Error::::NotFinder); - Reasons::::remove(&tip.reason); - Tips::::remove(&hash); + Reasons::::remove(&tip.reason); + Tips::::remove(&hash); if !tip.deposit.is_zero() { let _ = T::Currency::unreserve(&who, tip.deposit); } @@ -530,18 +706,15 @@ decl_module! { /// - DbReads: `Tippers`, `Reasons` /// - DbWrites: `Reasons`, `Tips` /// # - #[weight = 110_000_000 - + 4_000 * reason.len() as Weight - + 480_000 * T::Tippers::max_len() as Weight - + T::DbWeight::get().reads_writes(2, 2)] - fn tip_new(origin, reason: Vec, who: T::AccountId, tip_value: BalanceOf) { + #[weight = T::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32)] + fn tip_new(origin, reason: Vec, who: T::AccountId, #[compact] tip_value: BalanceOf) { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - Reasons::::insert(&reason_hash, &reason); + Reasons::::insert(&reason_hash, &reason); Self::deposit_event(RawEvent::NewTip(hash.clone())); let tips = vec![(tipper.clone(), tip_value)]; let tip = OpenTip { @@ -553,7 +726,7 @@ decl_module! { tips, finders_fee: false, }; - Tips::::insert(&hash, tip); + Tips::::insert(&hash, tip); } /// Declare a tip value for an already-open tip. @@ -581,17 +754,16 @@ decl_module! { /// - DbReads: `Tippers`, `Tips` /// - DbWrites: `Tips` /// # - #[weight = 68_000_000 + 2_000_000 * T::Tippers::max_len() as Weight - + T::DbWeight::get().reads_writes(2, 1)] - fn tip(origin, hash: T::Hash, tip_value: BalanceOf) { + #[weight = T::WeightInfo::tip(T::Tippers::max_len() as u32)] + fn tip(origin, hash: T::Hash, #[compact] tip_value: BalanceOf) { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); - let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { Self::deposit_event(RawEvent::TipClosing(hash.clone())); } - Tips::::insert(&hash, tip); + Tips::::insert(&hash, tip); } /// Close and payout a tip. @@ -611,20 +783,384 @@ decl_module! { /// - DbReads: `Tips`, `Tippers`, `tip finder` /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` /// # - #[weight = 220_000_000 + 1_100_000 * T::Tippers::max_len() as Weight - + T::DbWeight::get().reads_writes(3, 3)] + #[weight = T::WeightInfo::close_tip(T::Tippers::max_len() as u32)] fn close_tip(origin, hash: T::Hash) { ensure_signed(origin)?; - let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; - ensure!(system::Module::::block_number() >= *n, Error::::Premature); + let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; + ensure!(system::Module::::block_number() >= *n, Error::::Premature); // closed. - Reasons::::remove(&tip.reason); - Tips::::remove(hash); + Reasons::::remove(&tip.reason); + Tips::::remove(hash); Self::payout_tip(hash, tip); } + /// Propose a new bounty. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `DataDepositPerByte` for each byte in `reason`. It will be unreserved upon approval, + /// or slashed when rejected. + /// + /// - `curator`: The curator account whom will manage this bounty. + /// - `fee`: The curator fee. + /// - `value`: The total payment amount of this bounty, curator fee included. + /// - `description`: The description of this bounty. + #[weight = T::WeightInfo::propose_bounty(description.len() as u32)] + fn propose_bounty( + origin, + #[compact] value: BalanceOf, + description: Vec, + ) { + let proposer = ensure_signed(origin)?; + Self::create_bounty(proposer, description, value)?; + } + + /// Approve a bounty proposal. At a later time, the bounty will be funded and become active + /// and the original deposit will be returned. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB change. + /// # + #[weight = T::WeightInfo::approve_bounty()] + fn approve_bounty(origin, #[compact] bounty_id: ProposalIndex) { + T::ApproveOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); + + bounty.status = BountyStatus::Approved; + + BountyApprovals::::append(bounty_id); + + Ok(()) + })?; + } + + /// Assign a curator to a funded bounty. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB change. + /// # + #[weight = T::WeightInfo::propose_curator()] + fn propose_curator( + origin, + #[compact] bounty_id: ProposalIndex, + curator: ::Source, + #[compact] fee: BalanceOf, + ) { + T::ApproveOrigin::ensure_origin(origin)?; + + let curator = T::Lookup::lookup(curator)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match bounty.status { + BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => {}, + _ => return Err(Error::::UnexpectedStatus.into()), + }; + + ensure!(fee < bounty.value, Error::::InvalidFee); + + bounty.status = BountyStatus::CuratorProposed { curator }; + bounty.fee = fee; + + Ok(()) + })?; + } + + /// Unassign curator from a bounty. + /// + /// This function can only be called by the `RejectOrigin` a signed origin. + /// + /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious + /// or inactive. As a result, we will slash the curator when possible. + /// + /// If the origin is the curator, we take this as a sign they are unable to do their job and + /// they willingly give up. We could slash them, but for now we allow them to recover their + /// deposit and exit without issue. (We may want to change this if it is abused.) + /// + /// Finally, the origin can be anyone if and only if the curator is "inactive". This allows + /// anyone in the community to call out that a curator is not doing their due diligence, and + /// we should pick a new curator. In this case the curator should also be slashed. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB change. + /// # + #[weight = T::WeightInfo::unassign_curator()] + fn unassign_curator( + origin, + #[compact] bounty_id: ProposalIndex, + ) { + let maybe_sender = ensure_signed(origin.clone()) + .map(Some) + .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; + + match bounty.status { + BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { + // No curator to unassign at this point. + return Err(Error::::UnexpectedStatus.into()) + } + BountyStatus::CuratorProposed { ref curator } => { + // A curator has been proposed, but not accepted yet. + // Either `RejectOrigin` or the proposed curator can unassign the curator. + ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); + }, + BountyStatus::Active { ref curator, ref update_due } => { + // The bounty is active. + match maybe_sender { + // If the `RejectOrigin` is calling this function, slash the curator. + None => { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + }, + Some(sender) => { + // If the sender is not the curator, and the curator is inactive, + // slash the curator. + if sender != *curator { + let block_number = system::Module::::block_number(); + if *update_due < block_number { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } else { + // Curator has more time to give an update. + return Err(Error::::Premature.into()) + } + } else { + // Else this is the curator, willingly giving up their role. + // Give back their deposit. + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + // Continue to change bounty status below... + } + }, + } + }, + BountyStatus::PendingPayout { ref curator, .. } => { + // The bounty is pending payout, so only council can unassign a curator. + // By doing so, they are claiming the curator is acting maliciously, so + // we slash the curator. + ensure!(maybe_sender.is_none(), BadOrigin); + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } + }; + + bounty.status = BountyStatus::Funded; + Ok(()) + })?; + } + + /// Accept the curator role for a bounty. + /// A deposit will be reserved from curator and refund upon successful payout. + /// + /// May only be called from the curator. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB change. + /// # + #[weight = T::WeightInfo::accept_curator()] + fn accept_curator(origin, #[compact] bounty_id: ProposalIndex) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::CuratorProposed { ref curator } => { + ensure!(signer == *curator, Error::::RequireCurator); + + let deposit = T::BountyCuratorDeposit::get() * bounty.fee; + T::Currency::reserve(curator, deposit)?; + bounty.curator_deposit = deposit; + + let update_due = system::Module::::block_number() + T::BountyUpdatePeriod::get(); + bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; + + Ok(()) + }, + _ => Err(Error::::UnexpectedStatus.into()), + } + })?; + } + + /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to award. + /// - `beneficiary`: The beneficiary account whom will receive the payout. + #[weight = T::WeightInfo::award_bounty()] + fn award_bounty(origin, #[compact] bounty_id: ProposalIndex, beneficiary: ::Source) { + let signer = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match &bounty.status { + BountyStatus::Active { + curator, + .. + } => { + ensure!(signer == *curator, Error::::RequireCurator); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + bounty.status = BountyStatus::PendingPayout { + curator: signer, + beneficiary: beneficiary.clone(), + unlock_at: system::Module::::block_number() + T::BountyDepositPayoutDelay::get(), + }; + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); + } + + /// Claim the payout from an awarded bounty after payout delay. + /// + /// The dispatch origin for this call must be the beneficiary of this bounty. + /// + /// - `bounty_id`: Bounty ID to claim. + #[weight = T::WeightInfo::claim_bounty()] + fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { + let _ = ensure_signed(origin)?; // anyone can trigger claim + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; + if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { + ensure!(system::Module::::block_number() >= unlock_at, Error::::Premature); + let bounty_account = Self::bounty_account_id(bounty_id); + let balance = T::Currency::free_balance(&bounty_account); + let fee = bounty.fee.min(balance); // just to be safe + let payout = balance.saturating_sub(fee); + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + let _ = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail + let _ = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail + *maybe_bounty = None; + + BountyDescriptions::::remove(bounty_id); + + Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); + Ok(()) + } else { + Err(Error::::UnexpectedStatus.into()) + } + })?; + } + + /// Cancel a proposed or active bounty. All the funds will be sent to treasury and + /// the curator deposit will be unreserved if possible. + /// + /// Only `T::RejectOrigin` is able to cancel a bounty. + /// + /// - `bounty_id`: Bounty ID to cancel. + #[weight = T::WeightInfo::close_bounty_proposed().max(T::WeightInfo::close_bounty_active())] + fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { + T::RejectOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { + let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; + + match &bounty.status { + BountyStatus::Proposed => { + // The reject origin would like to cancel a proposed bounty. + BountyDescriptions::::remove(bounty_id); + let value = bounty.bond; + let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; + T::OnSlash::on_unbalanced(imbalance); + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyRejected(bounty_id, value)); + // Return early, nothing else to do. + return Ok(Some(T::WeightInfo::close_bounty_proposed()).into()) + }, + BountyStatus::Approved => { + // For weight reasons, we don't allow a council to cancel in this phase. + // We ask for them to wait until it is funded before they can cancel. + return Err(Error::::UnexpectedStatus.into()) + }, + BountyStatus::Funded | + BountyStatus::CuratorProposed { .. } => { + // Nothing extra to do besides the removal of the bounty below. + }, + BountyStatus::Active { curator, .. } => { + // Cancelled by council, refund deposit of the working curator. + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + // Then execute removal of the bounty below. + }, + BountyStatus::PendingPayout { .. } => { + // Bounty is already pending payout. If council wants to cancel + // this bounty, it should mean the curator was acting maliciously. + // So the council should first unassign the curator, slashing their + // deposit. + return Err(Error::::PendingPayout.into()) + } + } + + let bounty_account = Self::bounty_account_id(bounty_id); + + BountyDescriptions::::remove(bounty_id); + + let balance = T::Currency::free_balance(&bounty_account); + let _ = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyCanceled(bounty_id)); + Ok(Some(T::WeightInfo::close_bounty_active()).into()) + }) + } + + /// Extend the expiry time of an active bounty. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to extend. + /// - `remark`: additional information. + #[weight = T::WeightInfo::extend_bounty_expiry()] + fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::Active { ref curator, ref mut update_due } => { + ensure!(*curator == signer, Error::::RequireCurator); + *update_due = (system::Module::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyExtended(bounty_id)); + } + /// # /// - Complexity: `O(A)` where `A` is the number of approvals /// - Db reads and writes: `Approvals`, `pot account data` @@ -635,10 +1171,7 @@ decl_module! { fn on_initialize(n: T::BlockNumber) -> Weight { // Check to see if we should spend some funds! if (n % T::SpendPeriod::get()).is_zero() { - let approvals_len = Self::spend_funds(); - - 270_000_000 * approvals_len - + T::DbWeight::get().reads_writes(2 + approvals_len * 3, 2 + approvals_len * 3) + Self::spend_funds() } else { 0 } @@ -646,7 +1179,7 @@ decl_module! { } } -impl Module { +impl, I: Instance> Module { // Add public immutables and private mutables. /// The account ID of the treasury pot. @@ -657,8 +1190,15 @@ impl Module { T::ModuleId::get().into_account() } + /// The account ID of a bounty account + pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { + // only use two byte prefix to support 16 byte account id (used by test) + // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index + T::ModuleId::get().into_sub_account(("bt", id)) + } + /// The needed bond for a proposal whose spend is `value`. - fn calculate_bond(value: BalanceOf) -> BalanceOf { + fn calculate_bond(value: BalanceOf) -> BalanceOf { T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value) } @@ -667,9 +1207,9 @@ impl Module { /// /// `O(T)` and one storage access. fn insert_tip_and_check_closing( - tip: &mut OpenTip, T::BlockNumber, T::Hash>, + tip: &mut OpenTip, T::BlockNumber, T::Hash>, tipper: T::AccountId, - tip_value: BalanceOf, + tip_value: BalanceOf, ) -> bool { match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { Ok(pos) => tip.tips[pos] = (tipper, tip_value), @@ -686,7 +1226,7 @@ impl Module { } /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. - fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { + fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { let members = T::Tippers::sorted_members(); let mut members_iter = members.iter(); let mut member = members_iter.next(); @@ -710,7 +1250,7 @@ impl Module { /// /// Up to three balance operations. /// Plus `O(T)` (`T` is Tippers length). - fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { + fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { let mut tips = tip.tips; Self::retain_active_tips(&mut tips); tips.sort_by_key(|i| i.1); @@ -736,20 +1276,23 @@ impl Module { } /// Spend some money! returns number of approvals before spend. - fn spend_funds() -> u64 { + fn spend_funds() -> Weight { + let mut total_weight: Weight = Zero::zero(); + let mut budget_remaining = Self::pot(); Self::deposit_event(RawEvent::Spending(budget_remaining)); + let account_id = Self::account_id(); let mut missed_any = false; - let mut imbalance = >::zero(); - let prior_approvals_len = Approvals::mutate(|v| { - let prior_approvals_len = v.len() as u64; + let mut imbalance = >::zero(); + let proposals_len = Approvals::::mutate(|v| { + let proposals_approvals_len = v.len() as u32; v.retain(|&index| { // Should always be true, but shouldn't panic if false or we're screwed. if let Some(p) = Self::proposals(index) { if p.value <= budget_remaining { budget_remaining -= p.value; - >::remove(index); + >::remove(index); // return their deposit. let _ = T::Currency::unreserve(&p.proposer, p.bond); @@ -767,9 +1310,44 @@ impl Module { false } }); - prior_approvals_len + proposals_approvals_len + }); + + total_weight += T::WeightInfo::on_initialize_proposals(proposals_len); + + let bounties_len = BountyApprovals::::mutate(|v| { + let bounties_approval_len = v.len() as u32; + v.retain(|&index| { + Bounties::::mutate(index, |bounty| { + // Should always be true, but shouldn't panic if false or we're screwed. + if let Some(bounty) = bounty { + if bounty.value <= budget_remaining { + budget_remaining -= bounty.value; + + bounty.status = BountyStatus::Funded; + + // return their deposit. + let _ = T::Currency::unreserve(&bounty.proposer, bounty.bond); + + // fund the bounty account + imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); + + Self::deposit_event(RawEvent::BountyBecameActive(index)); + false + } else { + missed_any = true; + true + } + } else { + false + } + }) + }); + bounties_approval_len }); + total_weight += T::WeightInfo::on_initialize_bounties(bounties_len); + if !missed_any { // burn some proportion of the remaining budget if we run a surplus. let burn = (T::Burn::get() * budget_remaining).min(budget_remaining); @@ -786,7 +1364,7 @@ impl Module { // Thus we can't spend more than account free balance minus ED; // Thus account is kept alive; qed; if let Err(problem) = T::Currency::settle( - &Self::account_id(), + &account_id, imbalance, WithdrawReason::Transfer.into(), KeepAlive @@ -798,17 +1376,47 @@ impl Module { Self::deposit_event(RawEvent::Rollover(budget_remaining)); - prior_approvals_len + total_weight } /// Return the amount of money in the pot. // The existential deposit is not part of the pot so treasury account never gets deleted. - fn pot() -> BalanceOf { + fn pot() -> BalanceOf { T::Currency::free_balance(&Self::account_id()) // Must never be less than 0 but better be safe. .saturating_sub(T::Currency::minimum_balance()) } + fn create_bounty( + proposer: T::AccountId, + description: Vec, + value: BalanceOf, + ) -> DispatchResult { + ensure!(description.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); + ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); + + let index = Self::bounty_count(); + + // reserve deposit for new bounty + let bond = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * (description.len() as u32).into(); + T::Currency::reserve(&proposer, bond) + .map_err(|_| Error::::InsufficientProposersBalance)?; + + BountyCount::::put(index + 1); + + let bounty = Bounty { + proposer, value, fee: 0.into(), curator_deposit: 0.into(), bond, status: BountyStatus::Proposed, + }; + + Bounties::::insert(index, &bounty); + BountyDescriptions::::insert(index, description); + + Self::deposit_event(RawEvent::BountyProposed(index)); + + Ok(()) + } + pub fn migrate_retract_tip_for_tip_new() { /// An open tipping "motion". Retains all details of a tip including information on the finder /// and the members who have voted. @@ -837,9 +1445,9 @@ impl Module { for (hash, old_tip) in StorageKeyIterator::< T::Hash, - OldOpenTip, T::BlockNumber, T::Hash>, + OldOpenTip, T::BlockNumber, T::Hash>, Twox64Concat, - >::new(b"Treasury", b"Tips").drain() + >::new(I::PREFIX.as_bytes(), b"Tips").drain() { let (finder, deposit, finders_fee) = match old_tip.finder { Some((finder, deposit)) => (finder, deposit, true), @@ -854,13 +1462,13 @@ impl Module { tips: old_tip.tips, finders_fee }; - Tips::::insert(hash, new_tip) + Tips::::insert(hash, new_tip) } } } -impl OnUnbalanced> for Module { - fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { +impl, I: Instance> OnUnbalanced> for Module { + fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); // Must resolve into existing but better to be safe. diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 59a41a263cc9fed430cf05e86015602a67e460e0..d8cebbcd693ef7785f66fb00fbc04685c6f6ab34 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -67,7 +67,7 @@ impl frame_system::Trait for Test { type Call = (); type Hash = H256; type Hashing = BlakeTwo256; - type AccountId = u64; + type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; type Header = Header; type Event = Event; @@ -80,7 +80,7 @@ impl frame_system::Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type MaximumBlockLength = MaximumBlockLength; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -90,6 +90,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type Event = Event; type DustRemoval = (); @@ -98,17 +99,17 @@ impl pallet_balances::Trait for Test { type WeightInfo = (); } thread_local! { - static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } pub struct TenToFourteen; -impl Contains for TenToFourteen { - fn sorted_members() -> Vec { +impl Contains for TenToFourteen { + fn sorted_members() -> Vec { TEN_TO_FOURTEEN.with(|v| { v.borrow().clone() }) } #[cfg(feature = "runtime-benchmarks")] - fn add(new: &u64) { + fn add(new: &u128) { TEN_TO_FOURTEEN.with(|v| { let mut members = v.borrow_mut(); members.push(*new); @@ -130,25 +131,37 @@ parameter_types! { pub const TipCountdown: u64 = 1; pub const TipFindersFee: Percent = Percent::from_percent(20); pub const TipReportDepositBase: u64 = 1; - pub const TipReportDepositPerByte: u64 = 1; + pub const DataDepositPerByte: u64 = 1; + pub const BountyDepositBase: u64 = 80; + pub const BountyDepositPayoutDelay: u64 = 3; pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const BountyUpdatePeriod: u32 = 20; + pub const MaximumReasonLength: u32 = 16384; + pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); + pub const BountyValueMinimum: u64 = 1; } impl Trait for Test { type ModuleId = TreasuryModuleId; type Currency = pallet_balances::Module; - type ApproveOrigin = frame_system::EnsureRoot; - type RejectOrigin = frame_system::EnsureRoot; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; type Tippers = TenToFourteen; type TipCountdown = TipCountdown; type TipFindersFee = TipFindersFee; type TipReportDepositBase = TipReportDepositBase; - type TipReportDepositPerByte = TipReportDepositPerByte; + type DataDepositPerByte = DataDepositPerByte; type Event = Event; - type ProposalRejection = (); + type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BountyDepositBase = BountyDepositBase; + type BountyDepositPayoutDelay = BountyDepositPayoutDelay; + type BountyUpdatePeriod = BountyUpdatePeriod; + type BountyCuratorDeposit = BountyCuratorDeposit; + type BountyValueMinimum = BountyValueMinimum; + type MaximumReasonLength = MaximumReasonLength; type BurnDestination = (); // Just gets burned. type WeightInfo = (); } @@ -162,10 +175,19 @@ pub fn new_test_ext() -> sp_io::TestExternalities { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); t.into() } +fn last_event() -> RawEvent { + System::events().into_iter().map(|r| r.event) + .filter_map(|e| { + if let Event::treasury(inner) = e { Some(inner) } else { None } + }) + .last() + .unwrap() +} + #[test] fn genesis_config_works() { new_test_ext().execute_with(|| { @@ -175,7 +197,7 @@ fn genesis_config_works() { } fn tip_hash() -> H256 { - BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u64)) + BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128)) } #[test] @@ -185,7 +207,7 @@ fn tip_new_cannot_be_used_twice() { assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); assert_noop!( Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), - Error::::AlreadyKnown + Error::::AlreadyKnown ); }); } @@ -201,7 +223,7 @@ fn report_awesome_and_tip_works() { // other reports don't count. assert_noop!( Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), - Error::::AlreadyKnown + Error::::AlreadyKnown ); let h = tip_hash(); @@ -224,7 +246,7 @@ fn report_awesome_from_beneficiary_and_tip_works() { assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); - let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u64)); + let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); @@ -247,50 +269,26 @@ fn close_tip_works() { let h = tip_hash(); - assert_eq!( - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap(), - RawEvent::NewTip(h), - ); + assert_eq!(last_event(), RawEvent::NewTip(h)); assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); + assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_eq!( - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap(), - RawEvent::TipClosing(h), - ); + assert_eq!(last_event(), RawEvent::TipClosing(h)); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); + assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); System::set_block_number(2); assert_noop!(Treasury::close_tip(Origin::none(), h.into()), BadOrigin); assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); - assert_eq!( - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap(), - RawEvent::TipClosed(h, 3, 10), - ); + assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10)); - assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); + assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); }); } @@ -304,10 +302,10 @@ fn retract_tip_works() { assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); + assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); // with tip new Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -315,10 +313,10 @@ fn retract_tip_works() { let h = tip_hash(); assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); + assert_noop!(Treasury::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); assert_ok!(Treasury::retract_tip(Origin::signed(10), h.clone())); System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); + assert_noop!(Treasury::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); }); } @@ -387,7 +385,7 @@ fn spend_proposal_fails_when_proposer_poor() { new_test_ext().execute_with(|| { assert_noop!( Treasury::propose_spend(Origin::signed(2), 100, 3), - Error::::InsufficientProposersBalance, + Error::::InsufficientProposersBalance, ); }); } @@ -440,21 +438,21 @@ fn reject_already_rejected_spend_proposal_fails() { assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); - assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidProposalIndex); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } #[test] fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidProposalIndex); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } #[test] fn accept_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidProposalIndex); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } @@ -465,7 +463,7 @@ fn accept_already_rejected_spend_proposal_fails() { assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); - assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidProposalIndex); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); }); } @@ -561,6 +559,502 @@ fn inexistent_account_works() { }); } +#[test] +fn propose_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); + + assert_eq!(last_event(), RawEvent::BountyProposed(0)); + + let deposit: u64 = 85 + 5; + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 10, + bond: deposit, + status: BountyStatus::Proposed, + }); + + assert_eq!(Treasury::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); + + assert_eq!(Treasury::bounty_count(), 1); + }); +} + +#[test] +fn propose_bounty_validation_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_noop!( + Treasury::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), + Error::::ReasonTooBig + ); + + assert_noop!( + Treasury::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), + Error::::InsufficientProposersBalance + ); + + assert_noop!( + Treasury::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), + Error::::InvalidValue + ); + }); +} + +#[test] +fn close_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); + + assert_ok!(Treasury::close_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); + + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!(Treasury::bounties(0), None); + assert!(!Bounties::::contains_key(0)); + assert_eq!(Treasury::bounty_descriptions(0), None); + }); +} + +#[test] +fn approve_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Treasury::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + value: 50, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Approved, + }); + assert_eq!(Treasury::bounty_approvals(), vec![0]); + + assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); + + // deposit not returned yet + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + >::on_initialize(2); + + // return deposit + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: deposit, + status: BountyStatus::Funded, + }); + assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 + assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 50); + }); +} + +#[test] +fn assign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex); + + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { + curator: 4, + }, + }); + + assert_noop!(Treasury::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); + assert_noop!(Treasury::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::Active { + curator: 4, + update_due: 22, + }, + }); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 2); + }); +} + +#[test] +fn unassign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); + + assert_noop!(Treasury::unassign_curator(Origin::signed(1), 0), BadOrigin); + + assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 + }); +} + +#[test] +fn award_and_claim_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); + assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit + + assert_noop!(Treasury::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator); + + assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::PendingPayout { + curator: 4, + beneficiary: 3, + unlock_at: 5 + }, + }); + + assert_noop!(Treasury::claim_bounty(Origin::signed(1), 0), Error::::Premature); + + System::set_block_number(5); + >::on_initialize(5); + + assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); + + assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); + + assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 + assert_eq!(Balances::free_balance(3), 56); + assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); + + assert_eq!(Treasury::bounties(0), None); + assert_eq!(Treasury::bounty_descriptions(0), None); + }); +} + +#[test] +fn claim_handles_high_fee() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 30); + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 49)); + assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); + + System::set_block_number(5); + >::on_initialize(5); + + // make fee > balance + let _ = Balances::slash(&Treasury::bounty_account_id(0), 10); + + assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); + + assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); + + assert_eq!(Treasury::bounties(0), None); + assert_eq!(Treasury::bounty_descriptions(0), None); + }); +} + +#[test] +fn cancel_and_refund() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 60); + + assert_noop!(Treasury::close_bounty(Origin::signed(0), 0), BadOrigin); + + assert_ok!(Treasury::close_bounty(Origin::root(), 0)); + + assert_eq!(Treasury::pot(), 85); // - 25 + 10 + }); +} + +#[test] +fn award_and_cancel() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 0, 10)); + assert_ok!(Treasury::accept_curator(Origin::signed(0), 0)); + + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + + assert_ok!(Treasury::award_bounty(Origin::signed(0), 0, 3)); + + // Cannot close bounty directly when payout is happening... + assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::PendingPayout); + + // Instead unassign the curator to slash them and then close. + assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); + assert_ok!(Treasury::close_bounty(Origin::root(), 0)); + + assert_eq!(last_event(), RawEvent::BountyCanceled(0)); + + assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); + // Slashed. + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 0); + + assert_eq!(Treasury::bounties(0), None); + assert_eq!(Treasury::bounty_descriptions(0), None); + }); +} + +#[test] +fn expire_and_unassign() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 1, 10)); + assert_ok!(Treasury::accept_curator(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 5); + + System::set_block_number(22); + >::on_initialize(22); + + assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); + + System::set_block_number(23); + >::on_initialize(23); + + assert_ok!(Treasury::unassign_curator(Origin::signed(0), 0)); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 0); // slashed + + }); +} + +#[test] +fn extend_expiry() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); + + assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 10)); + assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 5); + assert_eq!(Balances::reserved_balance(4), 5); + + System::set_block_number(10); + >::on_initialize(10); + + assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator); + assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, + }); + + assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!(Treasury::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same + }); + + System::set_block_number(25); + >::on_initialize(25); + + assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); + assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 10); // not slashed + assert_eq!(Balances::reserved_balance(4), 0); + }); +} + #[test] fn test_last_reward_migration() { use sp_storage::Storage; @@ -591,7 +1085,7 @@ fn test_last_reward_migration() { let reason1 = BlakeTwo256::hash(b"reason1"); let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); - let old_tip_finder = OldOpenTip:: { + let old_tip_finder = OldOpenTip:: { reason: reason1, who: 10, finder: Some((20, 30)), @@ -602,7 +1096,7 @@ fn test_last_reward_migration() { let reason2 = BlakeTwo256::hash(b"reason2"); let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); - let old_tip_no_finder = OldOpenTip:: { + let old_tip_no_finder = OldOpenTip:: { reason: reason2, who: 20, finder: None, diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 6a1525dcfb475cfcfef92ab3f46f93275dbaa44c..098730aa30083195eb91e92a133650bce7344319 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-utility" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME utilities pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } [features] default = ["std"] diff --git a/frame/utility/README.md b/frame/utility/README.md new file mode 100644 index 0000000000000000000000000000000000000000..396396929118086dd12ba150d85eb240e3284de9 --- /dev/null +++ b/frame/utility/README.md @@ -0,0 +1,38 @@ +# Utility Module +A stateless module with helpers for dispatch management which does no re-authentication. + +- [`utility::Trait`](https://docs.rs/pallet-utility/latest/pallet_utility/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-utility/latest/pallet_utility/enum.Call.html) + +## Overview + +This module contains two basic pieces of functionality: +- Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a + single dispatch. This can be useful to amalgamate proposals, combining `set_code` with + corresponding `set_storage`s, for efficient multiple payouts with just a single signature + verify, or in combination with one of the other two dispatch functionality. +- Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from + an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative + account IDs) and these can be stacked. This can be useful as a key management tool, where you + need multiple distinct accounts (e.g. as controllers for many staking accounts), but where + it's perfectly fine to have each of them controlled by the same underlying keypair. + Derivative accounts are, for the purposes of proxy filtering considered exactly the same as + the oigin and are thus hampered with the origin's filters. + +Since proxy filters are respected in all dispatches of this module, it should never need to be +filtered by any proxy. + +## Interface + +### Dispatchable Functions + +#### For batch dispatch +* `batch` - Dispatch multiple calls from the sender's origin. + +#### For pseudonymal dispatch +* `as_derivative` - Dispatch a call from a derivative signed origin. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 155a279807afc2bc185f94cba6aae6639be5b448..8ca0e216f2887a2265f568bbe907be19bfc91021 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::{RawOrigin, EventRecord}; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; const SEED: u32 = 0; @@ -43,7 +43,7 @@ benchmarks! { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); } - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), calls) verify { assert_last_event::(Event::BatchCompleted.into()) @@ -53,6 +53,9 @@ benchmarks! { let u in 0 .. 1000; let caller = account("caller", u, SEED); let call = Box::new(frame_system::Call::remark(vec![]).into()); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), u as u16, call) } diff --git a/frame/utility/src/default_weights.rs b/frame/utility/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..d023dbddd4f8087b51820fb3b222980e1a599f98 --- /dev/null +++ b/frame/utility/src/default_weights.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn batch(c: u32, ) -> Weight { + (16461000 as Weight) + .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + } + // WARNING! Some components were not used: ["u"] + fn as_derivative() -> Weight { + (4086000 as Weight) + } +} diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index cf2ea9119b9733b6f7c5bd00352993e9e6f0f831..c39526ac0a7dfb9cae5f9543ac30e8481ff98332 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -69,15 +69,11 @@ use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; mod tests; mod benchmarking; +mod default_weights; pub trait WeightInfo { fn batch(c: u32, ) -> Weight; - fn as_derivative(u: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn batch(_c: u32, ) -> Weight { 1_000_000_000 } - fn as_derivative(_u: u32, ) -> Weight { 1_000_000_000 } + fn as_derivative() -> Weight; } /// Configuration trait. @@ -102,7 +98,7 @@ decl_event! { /// Events type. pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as - /// well as the error. [index, error] + /// well as the error. \[index, error\] BatchInterrupted(u32, DispatchError), /// Batch of dispatches completed fully with no error. BatchCompleted, @@ -145,7 +141,8 @@ decl_module! { #[weight = ( calls.iter() .map(|call| call.get_dispatch_info().weight) - .fold(15_000_000, |a: Weight, n| a.saturating_add(n).saturating_add(1_000_000)), + .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .saturating_add(T::WeightInfo::batch(calls.len() as u32)), { let all_operational = calls.iter() .map(|call| call.get_dispatch_info().class) @@ -186,13 +183,9 @@ decl_module! { /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - Base weight: 2.861 µs - /// - Plus the weight of the `call` - /// # #[weight = ( - call.get_dispatch_info().weight.saturating_add(3_000_000), + T::WeightInfo::as_derivative() + .saturating_add(call.get_dispatch_info().weight), call.get_dispatch_info().class, )] fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResult { diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 6de70506e452cb15f0c77501d20dcf18788cd070..8e693b234a93952d7d5909c72e9539155311176c 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -54,7 +54,7 @@ impl_outer_dispatch! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockWeight: Weight = Weight::max_value(); pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } @@ -79,7 +79,7 @@ impl frame_system::Trait for Test { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); @@ -89,6 +89,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { + type MaxLocks = (); type Balance = u64; type DustRemoval = (); type Event = TestEvent; @@ -121,6 +122,7 @@ type System = frame_system::Module; type Balances = pallet_balances::Module; type Utility = Module; +use frame_system::Call as SystemCall; use pallet_balances::Call as BalancesCall; use pallet_balances::Error as BalancesError; @@ -236,3 +238,20 @@ fn batch_early_exit_works() { assert_eq!(Balances::free_balance(2), 15); }); } + +#[test] +fn batch_weight_calculation_doesnt_overflow() { + new_test_ext().execute_with(|| { + let big_call = Call::System(SystemCall::fill_block(Perbill::from_percent(50))); + assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); + + // 3 * 50% saturates to 100% + let batch_call = Call::Utility(crate::Call::batch(vec![ + big_call.clone(), + big_call.clone(), + big_call.clone(), + ])); + + assert_eq!(batch_call.get_dispatch_info().weight, Weight::max_value()); + }); +} diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 67d796a6a980f9288b6c4cf27d1d1da2cbaed929..bea64c2b4f94d8366919102013ab97f8874aaeb0 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-vesting" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for manage vesting" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,18 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-rc5", default-features = false, path = "../benchmarking", optional = true } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io = { version = "2.0.0-rc5", path = "../../primitives/io" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-rc5", path = "../balances" } -sp-storage = { version = "2.0.0-rc5", path = "../../primitives/storage" } -hex-literal = "0.2.1" +sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0", path = "../balances" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +hex-literal = "0.3.1" [features] default = ["std"] diff --git a/frame/vesting/README.md b/frame/vesting/README.md new file mode 100644 index 0000000000000000000000000000000000000000..921fa94a1a2a99a14eb6f80798f41b813d6fb7bd --- /dev/null +++ b/frame/vesting/README.md @@ -0,0 +1,31 @@ +# Vesting Module + +- [`vesting::Trait`](https://docs.rs/pallet-vesting/latest/pallet_vesting/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-vesting/latest/pallet_vesting/enum.Call.html) + +## Overview + +A simple module providing a means of placing a linear curve on an account's locked balance. This +module ensures that there is a lock in place preventing the balance to drop below the *unvested* +amount for any reason other than transaction fee payment. + +As the amount vested increases over time, the amount unvested reduces. However, locks remain in +place and explicit action is needed on behalf of the user to ensure that the amount locked is +equivalent to the amount remaining to be vested. This is done through a dispatchable function, +either `vest` (in typical case where the sender is calling on their own behalf) or `vest_other` +in case the sender is calling on another account's behalf. + +## Interface + +This module implements the `VestingSchedule` trait. + +### Dispatchable Functions + +- `vest` - Update the lock, reducing it in line with the amount "vested" so far. +- `vest_other` - Update the lock of another account, reducing it in line with the amount + "vested" so far. + +[`Call`]: ./enum.Call.html +[`Trait`]: ./trait.Trait.html + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 24cdc28c97f20f1e45531ec5fa61d024985562d4..7c5478472f8ab326d9c8c2eedca6b22fd22585ce 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -22,13 +22,12 @@ use super::*; use frame_system::{RawOrigin, Module as System}; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use crate::Module as Vesting; const SEED: u32 = 0; -const MAX_LOCKS: u32 = 20; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -62,9 +61,9 @@ benchmarks! { _ { } vest_locked { - let l in 0 .. MAX_LOCKS; + let l in 0 .. MaxLocksOf::::get(); - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); add_vesting_schedule::(&caller)?; @@ -86,9 +85,9 @@ benchmarks! { } vest_unlocked { - let l in 0 .. MAX_LOCKS; + let l in 0 .. MaxLocksOf::::get(); - let caller = account("caller", 0, SEED); + let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); add_vesting_schedule::(&caller)?; @@ -110,7 +109,7 @@ benchmarks! { } vest_other_locked { - let l in 0 .. MAX_LOCKS; + let l in 0 .. MaxLocksOf::::get(); let other: T::AccountId = account("other", 0, SEED); let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); @@ -125,7 +124,7 @@ benchmarks! { "Vesting schedule not added", ); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) verify { // Nothing happened since everything is still vested. @@ -137,7 +136,7 @@ benchmarks! { } vest_other_unlocked { - let l in 0 .. MAX_LOCKS; + let l in 0 .. MaxLocksOf::::get(); let other: T::AccountId = account("other", 0, SEED); let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); @@ -152,7 +151,7 @@ benchmarks! { "Vesting schedule still active", ); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) verify { // Vesting schedule is removed! @@ -164,9 +163,9 @@ benchmarks! { } vested_transfer { - let l in 0 .. MAX_LOCKS; + let l in 0 .. MaxLocksOf::::get(); - let caller: T::AccountId = account("caller", 0, SEED); + let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); @@ -193,6 +192,38 @@ benchmarks! { "Lock not created", ); } + + force_vested_transfer { + let l in 0 .. MaxLocksOf::::get(); + + let source: T::AccountId = account("source", 0, SEED); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + // Give target existing locks + add_locks::(&target, l as u8); + + let transfer_amount = T::MinVestedTransfer::get(); + + let vesting_schedule = VestingInfo { + locked: transfer_amount, + per_block: 10.into(), + starting_block: 1.into(), + }; + }: _(RawOrigin::Root, source_lookup, target_lookup, vesting_schedule) + verify { + assert_eq!( + T::MinVestedTransfer::get(), + T::Currency::free_balance(&target), + "Transfer didn't happen", + ); + assert_eq!( + Vesting::::vesting_balance(&target), + Some(T::MinVestedTransfer::get()), + "Lock not created", + ); + } } #[cfg(test)] @@ -209,6 +240,7 @@ mod tests { assert_ok!(test_benchmark_vest_other_locked::()); assert_ok!(test_benchmark_vest_other_unlocked::()); assert_ok!(test_benchmark_vested_transfer::()); + assert_ok!(test_benchmark_force_vested_transfer::()); }); } } diff --git a/frame/vesting/src/default_weights.rs b/frame/vesting/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..dac9224d69ab0b67eec133bc39cd3ddb7260339e --- /dev/null +++ b/frame/vesting/src/default_weights.rs @@ -0,0 +1,62 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + fn vest_locked(l: u32, ) -> Weight { + (82109000 as Weight) + .saturating_add((332000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn vest_unlocked(l: u32, ) -> Weight { + (88419000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn vest_other_locked(l: u32, ) -> Weight { + (81277000 as Weight) + .saturating_add((321000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn vest_other_unlocked(l: u32, ) -> Weight { + (87584000 as Weight) + .saturating_add((19000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn vested_transfer(l: u32, ) -> Weight { + (185916000 as Weight) + .saturating_add((625000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn force_vested_transfer(l: u32, ) -> Weight { + (185916000 as Weight) + .saturating_add((625000 as Weight).saturating_mul(l as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().writes(4 as Weight)) + } +} diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index c521af1e03c597a7e48aea1fd299f210d13f6849..1583b06d69f83ab29ff1ee02f1cf584d8abd9088 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -61,8 +61,10 @@ use frame_support::traits::{ use frame_system::{ensure_signed, ensure_root}; mod benchmarking; +mod default_weights; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; pub trait WeightInfo { fn vest_locked(l: u32, ) -> Weight; @@ -70,14 +72,7 @@ pub trait WeightInfo { fn vest_other_locked(l: u32, ) -> Weight; fn vest_other_unlocked(l: u32, ) -> Weight; fn vested_transfer(l: u32, ) -> Weight; -} - -impl WeightInfo for () { - fn vest_locked(_l: u32, ) -> Weight { 1_000_000_000 } - fn vest_unlocked(_l: u32, ) -> Weight { 1_000_000_000 } - fn vest_other_locked(_l: u32, ) -> Weight { 1_000_000_000 } - fn vest_other_unlocked(_l: u32, ) -> Weight { 1_000_000_000 } - fn vested_transfer(_l: u32, ) -> Weight { 1_000_000_000 } + fn force_vested_transfer(l: u32, ) -> Weight; } pub trait Trait: frame_system::Trait { @@ -171,10 +166,10 @@ decl_storage! { decl_event!( pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { /// The amount vested has been updated. This could indicate more funds are available. The - /// balance given is the amount which is left unvested (and thus locked). - /// [account, unvested] + /// balance given is the amount which is left unvested (and thus locked). + /// \[account, unvested\] VestingUpdated(AccountId, Balance), - /// An [account] has become fully vested. No further vesting can happen. + /// An \[account\] has become fully vested. No further vesting can happen. VestingCompleted(AccountId), } ); @@ -213,12 +208,10 @@ decl_module! { /// - DbWeight: 2 Reads, 2 Writes /// - Reads: Vesting Storage, Balances Locks, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, [Sender Account] - /// - Benchmark: - /// - Unlocked: 48.76 + .048 * l µs (min square analysis) - /// - Locked: 44.43 + .284 * l µs (min square analysis) - /// - Using 50 µs fixed. Assuming less than 50 locks on any user, else we may want factor in number of locks. /// # - #[weight = 50_000_000 + T::DbWeight::get().reads_writes(2, 2)] + #[weight = T::WeightInfo::vest_locked(MaxLocksOf::::get()) + .max(T::WeightInfo::vest_unlocked(MaxLocksOf::::get())) + ] fn vest(origin) -> DispatchResult { let who = ensure_signed(origin)?; Self::update_lock(who) @@ -238,12 +231,10 @@ decl_module! { /// - DbWeight: 3 Reads, 3 Writes /// - Reads: Vesting Storage, Balances Locks, Target Account /// - Writes: Vesting Storage, Balances Locks, Target Account - /// - Benchmark: - /// - Unlocked: 44.3 + .294 * l µs (min square analysis) - /// - Locked: 48.16 + .103 * l µs (min square analysis) - /// - Using 50 µs fixed. Assuming less than 50 locks on any user, else we may want factor in number of locks. /// # - #[weight = 50_000_000 + T::DbWeight::get().reads_writes(3, 3)] + #[weight = T::WeightInfo::vest_other_locked(MaxLocksOf::::get()) + .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get())) + ] fn vest_other(origin, target: ::Source) -> DispatchResult { ensure_signed(origin)?; Self::update_lock(T::Lookup::lookup(target)?) @@ -264,10 +255,8 @@ decl_module! { /// - DbWeight: 3 Reads, 3 Writes /// - Reads: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, Target Account, [Sender Account] - /// - Benchmark: 100.3 + .365 * l µs (min square analysis) - /// - Using 100 µs fixed. Assuming less than 50 locks on any user, else we may want factor in number of locks. /// # - #[weight = 100_000_000 + T::DbWeight::get().reads_writes(3, 3)] + #[weight = T::WeightInfo::vested_transfer(MaxLocksOf::::get())] pub fn vested_transfer( origin, target: ::Source, @@ -303,10 +292,8 @@ decl_module! { /// - DbWeight: 4 Reads, 4 Writes /// - Reads: Vesting Storage, Balances Locks, Target Account, Source Account /// - Writes: Vesting Storage, Balances Locks, Target Account, Source Account - /// - Benchmark: 100.3 + .365 * l µs (min square analysis) - /// - Using 100 µs fixed. Assuming less than 50 locks on any user, else we may want factor in number of locks. /// # - #[weight = 100_000_000 + T::DbWeight::get().reads_writes(4, 4)] + #[weight = T::WeightInfo::force_vested_transfer(MaxLocksOf::::get())] pub fn force_vested_transfer( origin, source: ::Source, @@ -457,18 +444,22 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); } + parameter_types! { + pub const MaxLocks: u32 = 10; + } impl pallet_balances::Trait for Test { type Balance = u64; type DustRemoval = (); type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type MaxLocks = MaxLocks; type WeightInfo = (); } parameter_types! { diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml index d38eb9aa51f0a2fb7a3fd01da7155118b9259c20..93991a4aeb2ab5969944d41a4a0d0093e48b977e 100644 --- a/primitives/allocator/Cargo.toml +++ b/primitives/allocator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-allocator" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,14 +8,15 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Collection of allocator implementations." documentation = "https://docs.rs/sp-allocator" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", path = "../std", default-features = false } -sp-core = { version = "2.0.0-rc5", path = "../core", default-features = false } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0", path = "../std", default-features = false } +sp-core = { version = "2.0.0", path = "../core", default-features = false } +sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } log = { version = "0.4.8", optional = true } derive_more = { version = "0.99.2", optional = true } diff --git a/primitives/allocator/README.md b/primitives/allocator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..361feaae591f903caad2ff1909e173f6a2240c0c --- /dev/null +++ b/primitives/allocator/README.md @@ -0,0 +1,6 @@ +Collection of allocator implementations. + +This crate provides the following allocator implementations: +- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 8ad9fdfdbb730f6aa350d02a966dd5802553710e..a3c480e92135f3b3347f1726f396c3a7cde8a8eb 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -1,28 +1,29 @@ [package] name = "sp-api" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate runtime api primitives" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-api-proc-macro = { version = "2.0.0-rc5", path = "proc-macro" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../version" } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../primitives/state-machine" } +sp-api-proc-macro = { version = "2.0.0", path = "proc-macro" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-version = { version = "2.0.0", default-features = false, path = "../version" } +sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } hash-db = { version = "0.15.2", optional = true } [dev-dependencies] -sp-test-primitives = { version = "2.0.0-rc5", path = "../test-primitives" } +sp-test-primitives = { version = "2.0.0", path = "../test-primitives" } [features] default = [ "std" ] diff --git a/primitives/api/README.md b/primitives/api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..551de2f82e3659300fa4bad2f09cf5f373e794fc --- /dev/null +++ b/primitives/api/README.md @@ -0,0 +1,17 @@ +Substrate runtime api + +The Substrate runtime api is the crucial interface between the node and the runtime. +Every call that goes into the runtime is done with a runtime api. The runtime apis are not fixed. +Every Substrate user can define its own apis with +[`decl_runtime_apis`](macro.decl_runtime_apis.html) and implement them in +the runtime with [`impl_runtime_apis`](macro.impl_runtime_apis.html). + +Every Substrate runtime needs to implement the [`Core`] runtime api. This api provides the basic +functionality that every runtime needs to export. + +Besides the macros and the [`Core`] runtime api, this crates provides the [`Metadata`] runtime +api, the [`ApiExt`] trait, the [`CallApiAt`] trait and the [`ConstructRuntimeApi`] trait. + +On a meta level this implies, the client calls the generated API from the client perspective. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index e267b86c8eda1460f879c1a06cdd27b65a230e17..9b1661cf5ef60bd233c8b44eaf6a92cafa297ed1 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 6b4b82e9a9e8a4f4ac0d80ff58c6e7ae115a5975..867cdd6e57e48f6b63f7739be91c4fb49db759ea 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-test" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,22 +12,22 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", path = "../" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sp-version = { version = "2.0.0-rc5", path = "../../version" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } +sp-api = { version = "2.0.0", path = "../" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sp-version = { version = "2.0.0", path = "../../version" } +sp-runtime = { version = "2.0.0", path = "../../runtime" } +sp-blockchain = { version = "2.0.0", path = "../../blockchain" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } trybuild = "1.0.17" rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sp-core = { version = "2.0.0-rc5", path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sp-core = { version = "2.0.0", path = "../../core" } [[bench]] name = "bench" diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 46f138fccd5ecdd1c57c7e24d6ccacd1bf7472dc..851d2b8a4b652b85833d2f83b645511dc693decb 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -19,7 +19,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 14 | | } 15 | | } | |_- type in trait -16 | +16 | 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found struct `std::string::String` | diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 6d1ac0e9a25636bb2645037c5925b092422ad3d1..ed5b64144a6f65e19d3072a15288d5e12b3f4df8 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -24,8 +24,8 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 12 | sp_api::mock_impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` | - = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait @@ -42,6 +42,6 @@ error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for t 12 | sp_api::mock_impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` | - = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index cc2a5f05cd5ffea8251c8b280eae656ddf550d4f..c3e4850036090ddca3a06bd8c58f82ff78fc23e3 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -19,7 +19,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr 14 | | } 15 | | } | |_- type in trait -16 | +16 | 17 | sp_api::impl_runtime_apis! { | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `&u64` | diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 4366894dcf352f1706daacdc09287d52ecbf097d..2ab6823759572a5502775c01792d170dcdcdae45 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." @@ -8,17 +8,18 @@ license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-application-crypto" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } [features] default = [ "std" ] diff --git a/primitives/application-crypto/README.md b/primitives/application-crypto/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c86e33552f60605d5ca8e9db7e48621f24060f2f --- /dev/null +++ b/primitives/application-crypto/README.md @@ -0,0 +1,3 @@ +Traits and macros for constructing application specific strongly typed crypto wrappers. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 554e4d2532027fed46748a122db686f868e37e93..6ccec6943a7f692ce87f2f81ab5a2804177532a2 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto-test" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" description = "Integration tests for application-crypto" @@ -13,8 +13,8 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../test-utils/runtime/client" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../api" } -sp-application-crypto = { version = "2.0.0-rc5", path = "../" } +sp-core = { version = "2.0.0", default-features = false, path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sp-runtime = { version = "2.0.0", path = "../../runtime" } +sp-api = { version = "2.0.0", path = "../../api" } +sp-application-crypto = { version = "2.0.0", path = "../" } diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 83963d0c984c158f8a982fadb2ae1595b568380e..b8e482491a7d4277f8dbce263ff8b75a528f6e95 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Minimal fixed point arithmetic primitives and types for runtime." documentation = "https://docs.rs/sp-arithmetic" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-debug-derive = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/debug-derive" } +sp-debug-derive = { version = "2.0.0", default-features = false, path = "../../primitives/debug-derive" } [dev-dependencies] rand = "0.7.2" diff --git a/primitives/arithmetic/README.md b/primitives/arithmetic/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e6e52c2a82696ba42e756458fff5839d2d88c09f --- /dev/null +++ b/primitives/arithmetic/README.md @@ -0,0 +1,3 @@ +Minimal fixed point arithmetic primitives and types for runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index e82821aebafae706d5db1241f99de692d09a306b..2e291c5b11a9b82295c09419da7136251606b9c0 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic-fuzzer" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-arithmetic = { version = "2.0.0-rc5", path = ".." } +sp-arithmetic = { version = "2.0.0", path = ".." } honggfuzz = "0.5.49" primitive-types = "0.7.0" num-bigint = "0.2" diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index 41e2c759a59679a25eda9d42655eee83559cd4b4..1fed54f598eec73f581ff6c7b96fc1726e9eecc4 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -18,7 +18,7 @@ //! Infinite precision unsigned integer for substrate runtime. use num_traits::Zero; -use sp_std::{cmp::Ordering, ops, prelude::*, cell::RefCell, convert::TryFrom}; +use sp_std::{cmp::Ordering, ops, prelude::*, vec, cell::RefCell, convert::TryFrom}; // A sensible value for this would be half of the dword size of the host machine. Since the // runtime is compiled to 32bit webassembly, using 32 and 64 for single and double respectively diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index cf53988b33d31c4103714433c82a29d65eedd05e..035a704ba3009dfe622f812e2271b6906f7c88c3 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -323,9 +323,28 @@ macro_rules! implement_per_thing { /// #[doc = $title] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - #[derive(Encode, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, CompactAs)] + #[derive(Encode, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug)] pub struct $name($type); + /// Implementation makes any compact encoding of `PerThing::Inner` valid, + /// when decoding it will saturate up to `PerThing::ACCURACY`. + impl CompactAs for $name { + type As = $type; + fn encode_as(&self) -> &Self::As { + &self.0 + } + fn decode_from(x: Self::As) -> Self { + // Saturates if `x` is more than `$max` internally. + Self::from_parts(x) + } + } + + impl From> for $name { + fn from(x: codec::Compact<$name>) -> $name { + x.0 + } + } + impl PerThing for $name { type Inner = $type; type Upper = $upper_type; @@ -1166,6 +1185,17 @@ macro_rules! implement_per_thing { // deconstruct is also const, hence it can be called in const rhs. const C5: bool = C1.deconstruct() == 0; } + + #[test] + fn compact_decoding_saturate_when_beyond_accuracy() { + use num_traits::Bounded; + use codec::Compact; + + let p = Compact::<$name>::decode(&mut &Compact(<$type>::max_value()).encode()[..]) + .unwrap(); + assert_eq!((p.0).0, $max); + assert_eq!($name::from(p), $name::max_value()); + } } }; } diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index 4bce99247fe940d0d57b536aa3a220f976e7587e..ae373f1866ff6d61285e542a0643d020e52fa9f4 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -1,22 +1,23 @@ [package] name = "sp-authority-discovery" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-api = { version = "2.0.0", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/authority-discovery/README.md b/primitives/authority-discovery/README.md new file mode 100644 index 0000000000000000000000000000000000000000..65c2e22dde004483fdcff27d9a4b4392753bff3a --- /dev/null +++ b/primitives/authority-discovery/README.md @@ -0,0 +1,3 @@ +Runtime Api to help discover authorities. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/authority-discovery/src/lib.rs b/primitives/authority-discovery/src/lib.rs index 8903a7f3837553806912770243bf6b18e3a3aadf..0ae47c9758ee688dabae541a9266c6a55e4209f7 100644 --- a/primitives/authority-discovery/src/lib.rs +++ b/primitives/authority-discovery/src/lib.rs @@ -45,9 +45,9 @@ sp_api::decl_runtime_apis! { /// The authority discovery api. /// /// This api is used by the `client/authority-discovery` module to retrieve identifiers - /// of the current authority set. + /// of the current and next authority set. pub trait AuthorityDiscoveryApi { - /// Retrieve authority identifiers of the current authority set. + /// Retrieve authority identifiers of the current and next authority set. fn authorities() -> Vec; } } diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 514bde6a55308622674d35b656692fae68256ae3..b6f463029077f49375cda5ebe959abeb45d5ba06 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -1,20 +1,21 @@ [package] name = "sp-authorship" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/authorship/README.md b/primitives/authorship/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1aa1805cfc5e7740a55d416d66a8e1dbdcbedb3c --- /dev/null +++ b/primitives/authorship/README.md @@ -0,0 +1,3 @@ +Authorship Primitives + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index f9a52b7505bc8eea3d151743d4cc07b7ce6a6d07..767307c2a842af6db4b6c78afb4f02bb55baaeae 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -1,22 +1,23 @@ [package] name = "sp-block-builder" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "The block builder runtime api." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0", default-features = false, path = "../api" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } [features] default = [ "std" ] diff --git a/primitives/block-builder/README.md b/primitives/block-builder/README.md new file mode 100644 index 0000000000000000000000000000000000000000..433197d3be9e4104606809bab2834e16e8bcf065 --- /dev/null +++ b/primitives/block-builder/README.md @@ -0,0 +1,3 @@ +The block builder runtime api. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 7d77ae2faa7260f21a27a1283eb5b94b1dc2e7c8..79c0b56616fac75700fc1b5111425c5175c0bc83 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-blockchain" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate blockchain traits and primitives." documentation = "https://docs.rs/sp-blockchain" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,8 +19,8 @@ lru = "0.4.0" parking_lot = "0.10.0" derive_more = "0.99.2" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-rc5", path = "../consensus/common" } -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } -sp-block-builder = { version = "2.0.0-rc5", path = "../block-builder" } -sp-state-machine = { version = "0.8.0-rc5", path = "../state-machine" } -sp-database = { version = "2.0.0-rc5", path = "../database" } +sp-consensus = { version = "0.8.0", path = "../consensus/common" } +sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-block-builder = { version = "2.0.0", path = "../block-builder" } +sp-state-machine = { version = "0.8.0", path = "../state-machine" } +sp-database = { version = "2.0.0", path = "../database" } diff --git a/primitives/blockchain/README.md b/primitives/blockchain/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8298bfd7ae60a5d9a6be9c24ae4c7831e72f2bd0 --- /dev/null +++ b/primitives/blockchain/README.md @@ -0,0 +1,3 @@ +Substrate blockchain traits and primitives. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/chain-spec/Cargo.toml b/primitives/chain-spec/Cargo.toml index 20915d2f2fabfb80e446269c3b9e424e36532972..a94bd8ad0139ea4fe7c3eb8421bfebd61fb577c5 100644 --- a/primitives/chain-spec/Cargo.toml +++ b/primitives/chain-spec/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sp-chain-spec" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate chain configurations types." +readme = "README.md" [dependencies] serde = { version = "1.0.101", features = ["derive"] } diff --git a/primitives/chain-spec/README.md b/primitives/chain-spec/README.md new file mode 100644 index 0000000000000000000000000000000000000000..375f14a441ab606693d987b9f21567d66a865e62 --- /dev/null +++ b/primitives/chain-spec/README.md @@ -0,0 +1,3 @@ +Types and traits related to chain specifications. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 9ed9e840d1c9ad352507981811f1bebc79979f56..7ef5f67350baa5283552d3b07c950b36f6aba2b7 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -1,24 +1,25 @@ [package] name = "sp-consensus-aura" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../inherents" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../timestamp" } +sp-std = { version = "2.0.0", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0", default-features = false, path = "../../api" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/aura/README.md b/primitives/consensus/aura/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0f360ae67eb28a9145ad4c83980f6dff9a9d22f1 --- /dev/null +++ b/primitives/consensus/aura/README.md @@ -0,0 +1,3 @@ +Primitives for Aura. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index de540ebb106d52bc4bc8b9412ac50766a10d25dd..662f2ded506a50065b524205611cbe95f33bbb92 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -1,29 +1,30 @@ [package] name = "sp-consensus-babe" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../application-crypto" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } merlin = { version = "2.0", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8.0-rc5", optional = true, path = "../common" } -sp-consensus-slots = { version = "0.8.0-rc5", default-features = false, path = "../slots" } -sp-consensus-vrf = { version = "0.8.0-rc5", path = "../vrf", default-features = false } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../inherents" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../timestamp" } +sp-std = { version = "2.0.0", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0", default-features = false, path = "../../api" } +sp-consensus = { version = "0.8.0", optional = true, path = "../common" } +sp-consensus-slots = { version = "0.8.0", default-features = false, path = "../slots" } +sp-consensus-vrf = { version = "0.8.0", path = "../vrf", default-features = false } +sp-core = { version = "2.0.0", default-features = false, path = "../../core" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/babe/README.md b/primitives/consensus/babe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..54bae05fd6db72a167bc8fd687e2d6a91a8a47ba --- /dev/null +++ b/primitives/consensus/babe/README.md @@ -0,0 +1,3 @@ +Primitives for BABE. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index d8c5073274d75888bb1832e120c158620b0edb8d..a6f8c01928d96f4e9a8b1d07dddb7f7afdef1dd2 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Common utilities for building and using consensus engines in substrate." documentation = "https://docs.rs/sp-consensus/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,27 +16,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -libp2p = { version = "0.23.0", default-features = false } +libp2p = { version = "0.28.1", default-features = false } log = "0.4.8" -sp-core = { path= "../../core", version = "2.0.0-rc5"} -sp-inherents = { version = "2.0.0-rc5", path = "../../inherents" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +sp-core = { path= "../../core", version = "2.0.0"} +sp-inherents = { version = "2.0.0", path = "../../inherents" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" -sp-std = { version = "2.0.0-rc5", path = "../../std" } -sp-version = { version = "2.0.0-rc5", path = "../../version" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-utils = { version = "2.0.0-rc5", path = "../../utils" } -sp-trie = { version = "2.0.0-rc5", path = "../../trie" } -sp-api = { version = "2.0.0-rc5", path = "../../api" } +sp-std = { version = "2.0.0", path = "../../std" } +sp-version = { version = "2.0.0", path = "../../version" } +sp-runtime = { version = "2.0.0", path = "../../runtime" } +sp-utils = { version = "2.0.0", path = "../../utils" } +sp-trie = { version = "2.0.0", path = "../../trie" } +sp-api = { version = "2.0.0", path = "../../api" } codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0-rc5"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} wasm-timer = "0.2.4" [dev-dependencies] -sp-test-primitives = { version = "2.0.0-rc5", path = "../../test-primitives" } +sp-test-primitives = { version = "2.0.0", path = "../../test-primitives" } [features] default = [] diff --git a/primitives/consensus/common/README.md b/primitives/consensus/common/README.md new file mode 100644 index 0000000000000000000000000000000000000000..963bb0fbdba4af4de706c5303003f8036016291d --- /dev/null +++ b/primitives/consensus/common/README.md @@ -0,0 +1,7 @@ +Common utilities for building and using consensus engines in substrate. + +Much of this crate is _unstable_ and thus the API is likely to undergo +change. Implementors of traits should not rely on the interfaces to remain +the same. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 04b65a723e4a8d4c92997de8ba0e681564b936ad..fa4f233c680facb91dfd05b8b9a151d803429eb5 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -212,6 +212,7 @@ pub trait CanAuthorWith { /// Checks if the node can author blocks by using /// [`NativeVersion::can_author_with`](sp_version::NativeVersion::can_author_with). +#[derive(Clone)] pub struct CanAuthorWithNativeVersion(T); impl CanAuthorWithNativeVersion { @@ -239,6 +240,7 @@ impl, Block: BlockT> CanAuthorWith CanAuthorWith for AlwaysCanAuthor { @@ -247,6 +249,16 @@ impl CanAuthorWith for AlwaysCanAuthor { } } +/// Never can author. +#[derive(Clone)] +pub struct NeverCanAuthor; + +impl CanAuthorWith for NeverCanAuthor { + fn can_author_with(&self, _: &BlockId) -> Result<(), String> { + Err("Authoring is always disabled.".to_string()) + } +} + /// A type from which a slot duration can be obtained. pub trait SlotData { /// Gets the slot duration. diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index 598ff0ecb2949c1d983d125d6d58e17e04684b4a..cbcea886a709569db9564e58a3ba300c4d67fb8b 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -1,21 +1,22 @@ [package] name = "sp-consensus-pow" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../api" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } +sp-api = { version = "2.0.0", default-features = false, path = "../../api" } +sp-std = { version = "2.0.0", default-features = false, path = "../../std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } +sp-core = { version = "2.0.0", default-features = false, path = "../../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/consensus/pow/README.md b/primitives/consensus/pow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..881864377649804e610fddecce9e9a3eb8624856 --- /dev/null +++ b/primitives/consensus/pow/README.md @@ -0,0 +1,3 @@ +Primitives for Substrate Proof-of-Work (PoW) consensus. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index ebf2be94a3fb4b484ab3840f79b47d0df70dd524..e605d585b72299aaef74df7eaf386aa1f4a4fbba 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -1,19 +1,20 @@ [package] name = "sp-consensus-slots" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Primitives for slots-based consensus" edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc2", default-features = false, path = "../../runtime" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } [features] default = ["std"] diff --git a/primitives/consensus/slots/README.md b/primitives/consensus/slots/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f451c32888a47843c1f679a17d8db06d23811272 --- /dev/null +++ b/primitives/consensus/slots/README.md @@ -0,0 +1,3 @@ +Primitives for slots-based consensus engines. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml index b5934a8a263889bb4f2910187fa90844d2901ce1..d0b7d2e2f7aa8da3f4b3de335b16122b6bb8e2d8 100644 --- a/primitives/consensus/vrf/Cargo.toml +++ b/primitives/consensus/vrf/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sp-consensus-vrf" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Primitives for VRF based consensus" edition = "2018" license = "Apache-2.0" repository = "https://github.com/paritytech/substrate/" homepage = "https://substrate.dev" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { version = "1.0.0", package = "parity-scale-codec", default-features = false } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } -sp-std = { version = "2.0.0-rc5", path = "../../std", default-features = false } -sp-core = { version = "2.0.0-rc5", path = "../../core", default-features = false } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../runtime" } +sp-std = { version = "2.0.0", path = "../../std", default-features = false } +sp-core = { version = "2.0.0", path = "../../core", default-features = false } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } [features] default = ["std"] diff --git a/primitives/consensus/vrf/README.md b/primitives/consensus/vrf/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d66490e023b3869c7d3c5a20c86dc5f69ba8a8ef --- /dev/null +++ b/primitives/consensus/vrf/README.md @@ -0,0 +1,3 @@ +Primitives for VRF-based consensus engines. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 6787efbd845da21af029aed681d093764dd98e56..a58bb663628081c1094f06ae8b3b407c055c4179 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } @@ -26,7 +26,7 @@ hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.1.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } -substrate-bip39 = { version = "0.4.1", optional = true } +substrate-bip39 = { version = "0.4.2", optional = true } tiny-bip39 = { version = "0.7", optional = true } regex = { version = "1.3.1", optional = true } num-traits = { version = "0.2.8", default-features = false } @@ -34,9 +34,9 @@ zeroize = { version = "1.0.0", default-features = false } secrecy = { version = "0.6.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.10.0", optional = true } -sp-debug-derive = { version = "2.0.0-rc5", path = "../debug-derive" } -sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../externalities" } -sp-storage = { version = "2.0.0-rc5", default-features = false, path = "../storage" } +sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } +sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } +sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.1", optional = true } dyn-clonable = { version = "0.9.0", optional = true } @@ -52,14 +52,14 @@ twox-hash = { version = "1.5.0", default-features = false, optional = true } libsecp256k1 = { version = "0.3.2", default-features = false, features = ["hmac"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../runtime-interface" } +sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } [dev-dependencies] -sp-serializer = { version = "2.0.0-rc5", path = "../serializer" } +sp-serializer = { version = "2.0.0", path = "../serializer" } pretty_assertions = "0.6.1" -hex-literal = "0.2.1" +hex-literal = "0.3.1" rand = "0.7.2" -criterion = "0.2.11" +criterion = "0.3.3" serde_json = "1.0" rand_chacha = "0.2.2" diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index efacf0b2e76a6f4bb6738318783eb1ba7f043afd..e710f346efb31d17b1df7bbf2b066b43eb1199b3 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -265,7 +265,6 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { } /// Return the ss58-check string for this key. - #[cfg(feature = "std")] fn to_ss58check_with_version(&self, version: Ss58AddressFormat) -> String { let mut v = vec![version.into()]; @@ -274,9 +273,11 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { v.extend(&r.as_bytes()[0..2]); v.to_base58() } + /// Return the ss58-check string for this key. #[cfg(feature = "std")] fn to_ss58check(&self) -> String { self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) } + /// Some if the string is a properly encoded SS58Check address, optionally with /// a derivation path following. #[cfg(feature = "std")] @@ -317,8 +318,7 @@ lazy_static::lazy_static! { macro_rules! ss58_address_format { ( $( $identifier:tt => ($number:expr, $name:expr, $desc:tt) )* ) => ( /// A known address (sub)format/network ID for SS58. - #[derive(Copy, Clone, PartialEq, Eq)] - #[cfg_attr(feature = "std", derive(Debug))] + #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] pub enum Ss58AddressFormat { $(#[doc = $desc] $identifier),*, /// Use a manually provided numeric value. @@ -328,7 +328,13 @@ macro_rules! ss58_address_format { #[cfg(feature = "std")] impl std::fmt::Display for Ss58AddressFormat { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", self) + match self { + $( + Ss58AddressFormat::$identifier => write!(f, "{}", $name), + )* + Ss58AddressFormat::Custom(x) => write!(f, "{}", x), + } + } } @@ -337,6 +343,12 @@ macro_rules! ss58_address_format { ]; impl Ss58AddressFormat { + /// names of all address formats + pub fn all_names() -> &'static [&'static str] { + &[ + $($name),*, + ] + } /// All known address formats. pub fn all() -> &'static [Ss58AddressFormat] { &ALL_SS58_ADDRESS_FORMATS @@ -372,7 +384,7 @@ macro_rules! ss58_address_format { Ss58AddressFormat::Custom(n) if n == x => Ok(Ss58AddressFormat::Custom(x)), _ => Err(()), } - + #[cfg(not(feature = "std"))] Err(()) }, @@ -380,17 +392,38 @@ macro_rules! ss58_address_format { } } + /// Error encountered while parsing `Ss58AddressFormat` from &'_ str + /// unit struct for now. + #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] + pub struct ParseError; + impl<'a> TryFrom<&'a str> for Ss58AddressFormat { - type Error = (); + type Error = ParseError; - fn try_from(x: &'a str) -> Result { + fn try_from(x: &'a str) -> Result { match x { $($name => Ok(Ss58AddressFormat::$identifier)),*, - a => a.parse::().map_err(|_| ()).and_then(TryFrom::try_from), + a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ParseError), } } } + #[cfg(feature = "std")] + impl std::str::FromStr for Ss58AddressFormat { + type Err = ParseError; + + fn from_str(data: &str) -> Result { + Self::try_from(data) + } + } + + #[cfg(feature = "std")] + impl std::fmt::Display for ParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "failed to parse network value as u8") + } + } + #[cfg(feature = "std")] impl Default for Ss58AddressFormat { fn default() -> Self { @@ -401,10 +434,7 @@ macro_rules! ss58_address_format { #[cfg(feature = "std")] impl From for String { fn from(x: Ss58AddressFormat) -> String { - match x { - $(Ss58AddressFormat::$identifier => $name.into()),*, - Ss58AddressFormat::Custom(x) => x.to_string(), - } + x.to_string() } } ) @@ -442,10 +472,18 @@ ss58_address_format!( (13, "substratee", "Any SubstraTEE off-chain network private account (*25519).") KulupuAccount => (16, "kulupu", "Kulupu mainnet, standard account (*25519).") + DarkAccount => + (17, "dark", "Dark mainnet, standard account (*25519).") DarwiniaAccount => (18, "darwinia", "Darwinia Chain mainnet, standard account (*25519).") StafiAccount => (20, "stafi", "Stafi mainnet, standard account (*25519).") + DockTestAccount => + (21, "dock-testnet", "Dock testnet, standard account (*25519).") + DockMainAccount => + (22, "dock-mainnet", "Dock mainnet, standard account (*25519).") + ShiftNrg => + (23, "shift", "ShiftNrg mainnet, standard account (*25519).") SubsocialAccount => (28, "subsocial", "Subsocial network, standard account (*25519).") PhalaAccount => diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index c8a289639d4f2191329bf70f72574f2b21075ed5..94f6bb2967a0b77e1fdc31350edc0d37250e1b71 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -32,6 +32,7 @@ macro_rules! map { ); } +use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; use sp_std::prelude::*; use sp_std::ops::Deref; #[cfg(feature = "std")] @@ -50,6 +51,7 @@ pub use impl_serde::serialize as bytes; #[cfg(feature = "full_crypto")] pub mod hashing; + #[cfg(feature = "full_crypto")] pub use hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256, keccak_256}; pub mod hexdisplay; @@ -175,6 +177,18 @@ impl sp_std::ops::Deref for OpaqueMetadata { } } +/// Simple blob to hold a `PeerId` without committing to its format. +#[derive(Default, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, PassByInner)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct OpaquePeerId(pub Vec); + +impl OpaquePeerId { + /// Create new `OpaquePeerId` + pub fn new(vec: Vec) -> Self { + OpaquePeerId(vec) + } +} + /// Something that is either a native or an encoded value. #[cfg(feature = "std")] pub enum NativeOrEncoded { @@ -256,7 +270,7 @@ pub trait TypeId { /// A log level matching the one from `log` crate. /// /// Used internally by `sp_io::log` method. -#[derive(Encode, Decode, sp_runtime_interface::pass_by::PassByEnum, Copy, Clone)] +#[derive(Encode, Decode, PassByEnum, Copy, Clone)] pub enum LogLevel { /// `Error` log level. Error = 1, diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index b2ff3552135ce935a2e8d3a79c7533baa5afa117..4768496c4a50836427ca2b03bc5d679366fbf28c 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -19,7 +19,7 @@ use codec::{Encode, Decode}; use sp_std::{prelude::{Vec, Box}, convert::TryFrom}; -use crate::RuntimeDebug; +use crate::{OpaquePeerId, RuntimeDebug}; use sp_runtime_interface::pass_by::{PassByCodec, PassByInner, PassByEnum}; pub use crate::crypto::KeyTypeId; @@ -184,23 +184,12 @@ impl TryFrom for HttpRequestStatus { #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByCodec)] #[cfg_attr(feature = "std", derive(Default))] pub struct OpaqueNetworkState { - /// PeerId of the local node. + /// PeerId of the local node in SCALE encoded. pub peer_id: OpaquePeerId, /// List of addresses the node knows it can be reached as. pub external_addresses: Vec, } -/// Simple blob to hold a `PeerId` without committing to its format. -#[derive(Default, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByInner)] -pub struct OpaquePeerId(pub Vec); - -impl OpaquePeerId { - /// Create new `OpaquePeerId` - pub fn new(vec: Vec) -> Self { - OpaquePeerId(vec) - } -} - /// Simple blob to hold a `Multiaddr` without committing to its format. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByInner)] pub struct OpaqueMultiaddr(pub Vec); @@ -277,6 +266,8 @@ pub enum Capability { OffchainWorkerDbRead = 32, /// Access to offchain worker DB (writes). OffchainWorkerDbWrite = 64, + /// Manage the authorized nodes + NodeAuthorization = 128, } /// A set of capabilities @@ -495,6 +486,18 @@ pub trait Externalities: Send { buffer: &mut [u8], deadline: Option ) -> Result; + + /// Set the authorized nodes from runtime. + /// + /// In a permissioned network, the connections between nodes need to reach a + /// consensus between participants. + /// + /// - `nodes`: a set of nodes which are allowed to connect for the local node. + /// each one is identified with an `OpaquePeerId`, here it just use plain bytes + /// without any encoding. Invalid `OpaquePeerId`s are silently ignored. + /// - `authorized_only`: if true, only the authorized nodes are allowed to connect, + /// otherwise unauthorized nodes can also be connected through other mechanism. + fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool); } impl Externalities for Box { @@ -573,6 +576,10 @@ impl Externalities for Box { ) -> Result { (&mut **self).http_response_read_body(request_id, buffer, deadline) } + + fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { + (&mut **self).set_authorized_nodes(nodes, authorized_only) + } } /// An `OffchainExternalities` implementation with limited capabilities. @@ -691,6 +698,11 @@ impl Externalities for LimitedExternalities { self.check(Capability::Http, "http_response_read_body"); self.externalities.http_response_read_body(request_id, buffer, deadline) } + + fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { + self.check(Capability::NodeAuthorization, "set_authorized_nodes"); + self.externalities.set_authorized_nodes(nodes, authorized_only) + } } #[cfg(feature = "std")] diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index c939c5cfccc14773e1ad4fe59026766da4d79d75..3fe34cc0cfa7b76cdc4b62c4c7279eca2118e650 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -24,6 +24,7 @@ use std::{ collections::{BTreeMap, VecDeque}, sync::Arc, }; +use crate::OpaquePeerId; use crate::offchain::{ self, storage::{InMemOffchainStorage, OffchainOverlayedChange, OffchainOverlayedChanges}, @@ -375,6 +376,10 @@ impl offchain::Externalities for TestOffchainExt { Err(HttpError::IoError) } } + + fn set_authorized_nodes(&mut self, _nodes: Vec, _authorized_only: bool) { + unimplemented!() + } } /// The internal state of the fake transaction pool. diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index b015347e9aa280d8189d8212c247699ee4be31f8..9a757c8900542a8e41b2e931e571fc0eefa53613 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -27,8 +27,8 @@ use sp_std::vec::Vec; use schnorrkel::{signing_context, ExpansionMode, Keypair, SecretKey, MiniSecretKey, PublicKey, derive::{Derivation, ChainCode, CHAIN_CODE_LENGTH} }; -#[cfg(feature = "full_crypto")] -use core::convert::TryFrom; +#[cfg(feature = "std")] +use std::convert::TryFrom; #[cfg(feature = "std")] use substrate_bip39::mini_secret_from_entropy; #[cfg(feature = "std")] diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index dd9c58078753235ccb9caee462dc9b6d90006dd7..cc3fe7cd1b474525f82dfd1ef01791aa6fa0a695 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-database" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate database trait." documentation = "https://docs.rs/sp-database" +readme = "README.md" [dependencies] parking_lot = "0.10.0" diff --git a/primitives/database/README.md b/primitives/database/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cd0677eb9eb44ef3d26f1cfecdb1bdbbd371536a --- /dev/null +++ b/primitives/database/README.md @@ -0,0 +1,3 @@ +The main database trait, allowing Substrate to store data persistently. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 38efaed1614b0a45de82a796ffe1f28061e455a0..10164553f857ce5a4dbacbb349e08aac87d5f954 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-debug-derive" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 62a2413f3355d2424461dc1194df8b21e36e0559..f7211779634d90a96bbfb3824e46b42df4a14241 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "0.8.0-rc5" +version = "0.8.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -8,12 +8,22 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate externalities abstraction" documentation = "https://docs.rs/sp-externalities" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-storage = { version = "2.0.0-rc5", path = "../storage" } -sp-std = { version = "2.0.0-rc5", path = "../std" } -environmental = { version = "1.1.1" } -codec = { package = "parity-scale-codec", version = "1.3.1" } +sp-storage = { version = "2.0.0", path = "../storage", default-features = false } +sp-std = { version = "2.0.0", path = "../std", default-features = false } +environmental = { version = "1.1.1", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "environmental/std", + "sp-std/std", + "sp-storage/std", +] diff --git a/primitives/externalities/README.md b/primitives/externalities/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3141b2609e6375063dc42b089489451973393cba --- /dev/null +++ b/primitives/externalities/README.md @@ -0,0 +1,9 @@ +Substrate externalities abstraction + +The externalities mainly provide access to storage and to registered extensions. Extensions +are for example the keystore or the offchain externalities. These externalities are used to +access the node from the runtime via the runtime interfaces. + +This crate exposes the main [`Externalities`] trait. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/externalities/src/extensions.rs b/primitives/externalities/src/extensions.rs index 08d81e46c88fc92d0027ab513965a511d48b8316..d79f99d3344ead712c11f8460fd32cf06019134b 100644 --- a/primitives/externalities/src/extensions.rs +++ b/primitives/externalities/src/extensions.rs @@ -22,7 +22,9 @@ //! //! It is required that each extension implements the [`Extension`] trait. -use std::{collections::HashMap, collections::hash_map::Entry, any::{Any, TypeId}, ops::DerefMut}; +use sp_std::{ + collections::btree_map::{BTreeMap, Entry}, any::{Any, TypeId}, ops::DerefMut, boxed::Box, +}; use crate::Error; /// Marker trait for types that should be registered as [`Externalities`](crate::Externalities) extension. @@ -104,9 +106,10 @@ pub trait ExtensionStore { /// Stores extensions that should be made available through the externalities. #[derive(Default)] pub struct Extensions { - extensions: HashMap>, + extensions: BTreeMap>, } +#[cfg(feature = "std")] impl std::fmt::Debug for Extensions { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Extensions: ({})", self.extensions.len()) diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 8e141867195b743d99817fa464f930769af7fbf2..388482964f18c9a49fdeda1e5a60d5a1e44dc1a6 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] + //! Substrate externalities abstraction //! //! The externalities mainly provide access to storage and to registered extensions. Extensions @@ -23,9 +25,9 @@ //! //! This crate exposes the main [`Externalities`] trait. -use std::any::{Any, TypeId}; +use sp_std::{any::{Any, TypeId}, vec::Vec, boxed::Box}; -use sp_storage::ChildInfo; +use sp_storage::{ChildInfo, TrackedStorageKey}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; pub use extensions::{Extension, Extensions, ExtensionStore}; @@ -248,12 +250,19 @@ pub trait Externalities: ExtensionStore { /// Resets read/write count for the benchmarking process. fn reset_read_write_count(&mut self); + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Gets the current DB tracking whitelist. + fn get_whitelist(&self) -> Vec; + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// /// Adds new storage keys to the DB tracking whitelist. - fn set_whitelist(&mut self, new: Vec>); + fn set_whitelist(&mut self, new: Vec); } /// Extension for the [`Externalities`] trait. diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 8fc318df458fc9bdee97154ad9738cfa23ca7dcd..53548d378c4c86d58ea64c206fb2c9bfdd857abc 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-grandpa" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,21 +8,22 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Primitives for GRANDPA integration, suitable for WASM compilation." documentation = "https://docs.rs/sp-finality-grandpa" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.12.3", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-api = { version = "2.0.0", default-features = false, path = "../api" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/finality-grandpa/README.md b/primitives/finality-grandpa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..77a7abca2eef3fa2da9b8c2580c1c0ae7f1d184b --- /dev/null +++ b/primitives/finality-grandpa/README.md @@ -0,0 +1,3 @@ +Primitives for GRANDPA integration, suitable for WASM compilation. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/finality-tracker/Cargo.toml b/primitives/finality-tracker/Cargo.toml index 67a3e836567c6c80ef24fa2325101c7a2fe9c168..756b6d91e296e467161774765f1581bf0b119c3b 100644 --- a/primitives/finality-tracker/Cargo.toml +++ b/primitives/finality-tracker/Cargo.toml @@ -1,20 +1,21 @@ [package] name = "sp-finality-tracker" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME module that tracks the last finalized block, as perceived by block authors." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } [features] default = ["std"] diff --git a/primitives/finality-tracker/README.md b/primitives/finality-tracker/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f9778e38a2bce2e6893af78090253765a54986fa --- /dev/null +++ b/primitives/finality-tracker/README.md @@ -0,0 +1,3 @@ +FRAME module that tracks the last finalized block, as perceived by block authors. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index fee8449a3b2e9f0894da56f6f15d1d55d5d760fb..10c66b73aec1ad66d3f0a92efad22994874efab8 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-inherents" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Provides types and traits for creating and checking inherents." documentation = "https://docs.rs/sp-inherents" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = { version = "0.10.0", optional = true } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } derive_more = { version = "0.99.2", optional = true } diff --git a/primitives/inherents/README.md b/primitives/inherents/README.md new file mode 100644 index 0000000000000000000000000000000000000000..78aa625fe85692b1d53164415a5b2086d5704186 --- /dev/null +++ b/primitives/inherents/README.md @@ -0,0 +1,17 @@ +Provides types and traits for creating and checking inherents. + +Each inherent is added to a produced block. Each runtime decides on which inherents it +wants to attach to its blocks. All data that is required for the runtime to create the inherents +is stored in the `InherentData`. This `InherentData` is constructed by the node and given to +the runtime. + +Types that provide data for inherents, should implement `InherentDataProvider` and need to be +registered at `InherentDataProviders`. + +In the runtime, modules need to implement `ProvideInherent` when they can create and/or check +inherents. By implementing `ProvideInherent`, a module is not enforced to create an inherent. +A module can also just check given inherents. For using a module as inherent provider, it needs +to be registered by the `construct_runtime!` macro. The macro documentation gives more +information on how that is done. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 04ee4efd97a240c4a100ec8a8d4c30a04808f11a..70a78f99d5dae5d888d6396f2ec0d4aa5caa0fe9 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-io" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "I/O for Substrate runtimes" documentation = "https://docs.rs/sp-io" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,18 +17,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } hash-db = { version = "0.15.2", default-features = false } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "0.8.0-rc5", optional = true, path = "../../primitives/state-machine" } -sp-wasm-interface = { version = "2.0.0-rc5", path = "../../primitives/wasm-interface", default-features = false } -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "2.0.0-rc5", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../externalities" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../tracing" } +sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } +sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface", default-features = false } +sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } +sp-trie = { version = "2.0.0", optional = true, path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } +sp-tracing = { version = "2.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } parking_lot = { version = "0.10.0", optional = true } +tracing = { version = "0.1.19", default-features = false } +tracing-core = { version = "0.1.15", default-features = false} [features] default = ["std"] @@ -42,11 +45,18 @@ std = [ "sp-runtime-interface/std", "sp-externalities", "sp-wasm-interface/std", + "sp-tracing/std", + "tracing/std", + "tracing-core/std", "log", "futures", "parking_lot", ] +with-tracing = [ + "sp-tracing/with-tracing" +] + # These two features are used for `no_std` builds for the environments which already provides # `#[panic_handler]`, `#[alloc_error_handler]` and `#[global_allocator]`. # diff --git a/primitives/io/README.md b/primitives/io/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a24370cc566b359b2b3aeab195d185dec34f7e78 --- /dev/null +++ b/primitives/io/README.md @@ -0,0 +1,3 @@ +I/O host interface for substrate runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 59d1c4f37ef275cd510d015e8911c0a9f8f32632..2e7cb3e7efad936941412b80c3a4393f3233c6f5 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -32,6 +32,9 @@ use sp_std::vec::Vec; #[cfg(feature = "std")] use sp_std::ops::Deref; +#[cfg(feature = "std")] +use tracing; + #[cfg(feature = "std")] use sp_core::{ crypto::Pair, @@ -42,7 +45,7 @@ use sp_core::{ }; use sp_core::{ - crypto::KeyTypeId, ed25519, sr25519, ecdsa, H256, LogLevel, + OpaquePeerId, crypto::KeyTypeId, ed25519, sr25519, ecdsa, H256, LogLevel, offchain::{ Timestamp, HttpRequestId, HttpRequestStatus, HttpError, StorageKind, OpaqueNetworkState, }, @@ -52,6 +55,7 @@ use sp_core::{ use sp_trie::{TrieConfiguration, trie_types::Layout}; use sp_runtime_interface::{runtime_interface, Pointer}; +use sp_runtime_interface::pass_by::PassBy; use codec::{Encode, Decode}; @@ -94,7 +98,7 @@ pub trait Storage { let data = &value[value_offset.min(value.len())..]; let written = std::cmp::min(data.len(), value_out.len()); value_out[..written].copy_from_slice(&data[..written]); - value.len() as u32 + data.len() as u32 }) } @@ -235,7 +239,7 @@ pub trait DefaultChildStorage { let data = &value[value_offset.min(value.len())..]; let written = std::cmp::min(data.len(), value_out.len()); value_out[..written].copy_from_slice(&data[..written]); - value.len() as u32 + data.len() as u32 }) } @@ -960,6 +964,13 @@ pub trait Offchain { .http_response_read_body(request_id, buffer, deadline) .map(|r| r as u32) } + + /// Set the authorized nodes and authorized_only flag. + fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { + self.extension::() + .expect("set_authorized_nodes can be called only in the offchain worker context") + .set_authorized_nodes(nodes, authorized_only) + } } /// Wasm only interface that provides functions for calling into the allocator. @@ -997,55 +1008,156 @@ pub trait Logging { } } -#[cfg(feature = "std")] -sp_externalities::decl_extension! { - /// Extension to allow running traces in wasm via Proxy - pub struct TracingProxyExt(sp_tracing::proxy::TracingProxy); +#[derive(Encode, Decode)] +/// Crossing is a helper wrapping any Encode-Decodeable type +/// for transferring over the wasm barrier. +pub struct Crossing(T); + +impl PassBy for Crossing { + type PassBy = sp_runtime_interface::pass_by::Codec; } -/// Interface that provides functions for profiling the runtime. -#[runtime_interface] +impl Crossing { + + /// Convert into the inner type + pub fn into_inner(self) -> T { + self.0 + } +} + +// useful for testing +impl core::default::Default for Crossing + where T: core::default::Default + Encode + Decode +{ + fn default() -> Self { + Self(Default::default()) + } + +} + +/// Interface to provide tracing facilities for wasm. Modelled after tokios `tracing`-crate +/// interfaces. See `sp-tracing` for more information. +#[runtime_interface(wasm_only, no_tracing)] pub trait WasmTracing { - /// To create and enter a `tracing` span, using `sp_tracing::proxy` - /// Returns 0 value to indicate that no further traces should be attempted - fn enter_span(&mut self, target: &str, name: &str) -> u64 { - if sp_tracing::wasm_tracing_enabled() { - match self.extension::() { - Some(proxy) => return proxy.enter_span(target, name), - None => { - if self.register_extension(TracingProxyExt(sp_tracing::proxy::TracingProxy::new())).is_ok() { - if let Some(proxy) = self.extension::() { - return proxy.enter_span(target, name); - } - } else { - log::warn!( - target: "tracing", - "Unable to register extension: TracingProxyExt" - ); - } - } + /// Whether the span described in `WasmMetadata` should be traced wasm-side + /// On the host converts into a static Metadata and checks against the global `tracing` dispatcher. + /// + /// When returning false the calling code should skip any tracing-related execution. In general + /// within the same block execution this is not expected to change and it doesn't have to be + /// checked more than once per metadata. This exists for optimisation purposes but is still not + /// cheap as it will jump the wasm-native-barrier every time it is called. So an implementation might + /// chose to cache the result for the execution of the entire block. + fn enabled(&mut self, metadata: Crossing) -> bool { + let metadata: &tracing_core::metadata::Metadata<'static> = (&metadata.into_inner()).into(); + tracing::dispatcher::get_default(|d| { + d.enabled(metadata) + }) + } + + /// Open a new span with the given attributes. Return the u64 Id of the span. + /// + /// On the native side this goes through the default `tracing` dispatcher to register the span + /// and then calls `clone_span` with the ID to signal that we are keeping it around on the wasm- + /// side even after the local span is dropped. The resulting ID is then handed over to the wasm- + /// side. + fn enter_span(&mut self, span: Crossing) -> u64 { + let span: tracing::Span = span.into_inner().into(); + match span.id() { + Some(id) => tracing::dispatcher::get_default(|d| { + // inform dispatch that we'll keep the ID around + // then enter it immediately + let final_id = d.clone_span(&id); + d.enter(&final_id); + final_id.into_u64() + }), + _ => { + 0 } } - log::debug!( - target: "tracing", - "Notify to runtime that tracing is disabled." - ); - 0 - } - - /// Exit a `tracing` span, using `sp_tracing::proxy` - fn exit_span(&mut self, id: u64) { - if let Some(proxy) = self.extension::() { - proxy.exit_span(id) - } else { - log::warn!( - target: "tracing", - "Unable to load extension: TracingProxyExt" - ); + } + + /// Emit the given event to the global tracer on the native side + fn event(&mut self, event: Crossing) { + event.into_inner().emit(); + } + + /// Signal that a given span-id has been exited. On native, this directly + /// proxies the span to the global dispatcher. + fn exit(&mut self, span: u64) { + tracing::dispatcher::get_default(|d| { + let id = tracing_core::span::Id::from_u64(span); + d.exit(&id); + }); + } +} + +#[cfg(all(not(feature="std"), feature="with-tracing"))] +mod tracing_setup { + use core::sync::atomic::{AtomicBool, Ordering}; + use tracing_core::{ + dispatcher::{Dispatch, set_global_default}, + span::{Id, Record, Attributes}, + Metadata, Event, + }; + use super::{wasm_tracing, Crossing}; + + const TRACING_SET : AtomicBool = AtomicBool::new(false); + + + /// The PassingTracingSubscriber implements `tracing_core::Subscriber` + /// and pushes the information across the runtime interface to the host + struct PassingTracingSubsciber; + + impl tracing_core::Subscriber for PassingTracingSubsciber { + fn enabled(&self, metadata: &Metadata<'_>) -> bool { + wasm_tracing::enabled(Crossing(metadata.into())) + } + fn new_span(&self, attrs: &Attributes<'_>) -> Id { + Id::from_u64(wasm_tracing::enter_span(Crossing(attrs.into()))) + } + fn enter(&self, span: &Id) { + // Do nothing, we already entered the span previously + } + /// Not implemented! We do not support recording values later + /// Will panic when used. + fn record(&self, span: &Id, values: &Record<'_>) { + unimplemented!{} // this usage is not supported + } + /// Not implemented! We do not support recording values later + /// Will panic when used. + fn record_follows_from(&self, span: &Id, follows: &Id) { + unimplemented!{ } // this usage is not supported + } + fn event(&self, event: &Event<'_>) { + wasm_tracing::event(Crossing(event.into())) + } + fn exit(&self, span: &Id) { + wasm_tracing::exit(span.into_u64()) + } + } + + + /// Initialize tracing of sp_tracing on wasm with `with-tracing` enabled. + /// Can be called multiple times from within the same process and will only + /// set the global bridging subscriber once. + pub fn init_tracing() { + if TRACING_SET.load(Ordering::Relaxed) == false { + set_global_default(Dispatch::new(PassingTracingSubsciber {})) + .expect("We only ever call this once"); + TRACING_SET.store(true, Ordering::Relaxed); } } } +#[cfg(not(all(not(feature="std"), feature="with-tracing")))] +mod tracing_setup { + /// Initialize tracing of sp_tracing not necessary – noop. To enable build + /// without std and with the `with-tracing`-feature. + pub fn init_tracing() { } +} + +pub use tracing_setup::init_tracing; + /// Wasm-only interface that provides functions for interacting with the sandbox. #[runtime_interface(wasm_only)] pub trait Sandbox { @@ -1236,17 +1348,18 @@ mod tests { #[test] fn read_storage_works() { + let value = b"\x0b\0\0\0Hello world".to_vec(); let mut t = BasicExternalities::new(Storage { - top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], + top: map![b":test".to_vec() => value.clone()], children_default: map![], }); t.execute_with(|| { let mut v = [0u8; 4]; - assert!(storage::read(b":test", &mut v[..], 0).unwrap() >= 4); + assert_eq!(storage::read(b":test", &mut v[..], 0).unwrap(), value.len() as u32); assert_eq!(v, [11u8, 0, 0, 0]); let mut w = [0u8; 11]; - assert!(storage::read(b":test", &mut w[..], 4).unwrap() >= 11); + assert_eq!(storage::read(b":test", &mut w[..], 4).unwrap(), value.len() as u32 - 4); assert_eq!(&w, b"Hello world"); }); } diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index 05ca468133296bc72f68e1b9d00a5a138dfb4691..be4db5834458ea166cf3481b1cdceea4fc57b8da 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keyring" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,13 +8,14 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Keyring support code for the runtime. A set of test accounts." documentation = "https://docs.rs/sp-keyring" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", path = "../core" } -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } +sp-core = { version = "2.0.0", path = "../core" } +sp-runtime = { version = "2.0.0", path = "../runtime" } lazy_static = "1.4.0" strum = { version = "0.16.0", features = ["derive"] } diff --git a/primitives/keyring/README.md b/primitives/keyring/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1610f237df97a8a96f5017f0c15c686661de36e4 --- /dev/null +++ b/primitives/keyring/README.md @@ -0,0 +1,3 @@ +Support code for the runtime. A set of test accounts. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index ff843865d67279e57832248a8d47e200f551fb09..4a66743028d199108193e24fd6d055abfa38acde 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "sp-npos-elections" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "NPoS election algorithm primitives" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -14,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-npos-elections-compact = { version = "2.0.0-rc5", path = "./compact" } -sp-arithmetic = { version = "2.0.0-rc5", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-npos-elections-compact = { version = "2.0.0", path = "./compact" } +sp-arithmetic = { version = "2.0.0", default-features = false, path = "../arithmetic" } [dev-dependencies] -substrate-test-utils = { version = "2.0.0-rc5", path = "../../test-utils" } +substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } rand = "0.7.3" -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } [features] default = ["std"] diff --git a/primitives/npos-elections/README.md b/primitives/npos-elections/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a98351a6d89a715c62709dfd9188a2ddf55fee62 --- /dev/null +++ b/primitives/npos-elections/README.md @@ -0,0 +1,12 @@ +A set of election algorithms to be used with a substrate runtime, typically within the staking +sub-system. Notable implementation include + +- [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast + election method that ensures PJR, but does not provide a constant factor approximation of the + maximin problem. +- [`balance_solution`]: Implements the star balancing algorithm. This iterative process can + increase a solutions score, as described in [`evaluate_support`]. + +More information can be found at: https://arxiv.org/abs/2004.12990 + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/npos-elections/compact/Cargo.toml b/primitives/npos-elections/compact/Cargo.toml index 4abe79b77f04928775e30a6d55e58474d9ce56a3..1873f8fa160570e4c3378f3e6681300cbf33ab46 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/primitives/npos-elections/compact/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections-compact" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index 03526d17981f94ffde04d121140cecfaae53d9f6..134f3f59ff177bb03e37b0afae984de463b46e70 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -149,22 +149,9 @@ fn struct_def( ) }).collect::(); - - let len_impl = (1..=count).map(|c| { - let field_name = field_name_for(c); - quote!( - all_len = all_len.saturating_add(self.#field_name.len()); - ) - }).collect::(); - - let edge_count_impl = (1..count).map(|c| { - let field_name = field_name_for(c); - quote!( - all_edges = all_edges.saturating_add( - self.#field_name.len().saturating_mul(#c as usize) - ); - ) - }).collect::(); + let len_impl = len_impl(count); + let edge_count_impl = edge_count_impl(count); + let unique_targets_impl = unique_targets_impl(count); let derives_and_maybe_compact_encoding = if compact_encoding { // custom compact encoding. @@ -209,6 +196,26 @@ fn struct_def( all_edges } + /// Get the number of unique targets in the whole struct. + /// + /// Once presented with a list of winners, this set and the set of winners must be + /// equal. + /// + /// The resulting indices are sorted. + pub fn unique_targets(&self) -> Vec<#target_type> { + let mut all_targets: Vec<#target_type> = Vec::with_capacity(self.average_edge_count()); + let mut maybe_insert_target = |t: #target_type| { + match all_targets.binary_search(&t) { + Ok(_) => (), + Err(pos) => all_targets.insert(pos, t) + } + }; + + #unique_targets_impl + + all_targets + } + /// Get the average edge count. pub fn average_edge_count(&self) -> usize { self.edge_count().checked_div(self.len()).unwrap_or(0) @@ -217,6 +224,65 @@ fn struct_def( )) } +fn len_impl(count: usize) -> TokenStream2 { + (1..=count).map(|c| { + let field_name = field_name_for(c); + quote!( + all_len = all_len.saturating_add(self.#field_name.len()); + ) + }).collect::() +} + +fn edge_count_impl(count: usize) -> TokenStream2 { + (1..=count).map(|c| { + let field_name = field_name_for(c); + quote!( + all_edges = all_edges.saturating_add( + self.#field_name.len().saturating_mul(#c as usize) + ); + ) + }).collect::() +} + +fn unique_targets_impl(count: usize) -> TokenStream2 { + let unique_targets_impl_single = { + let field_name = field_name_for(1); + quote! { + self.#field_name.iter().for_each(|(_, t)| { + maybe_insert_target(*t); + }); + } + }; + + let unique_targets_impl_double = { + let field_name = field_name_for(2); + quote! { + self.#field_name.iter().for_each(|(_, (t1, _), t2)| { + maybe_insert_target(*t1); + maybe_insert_target(*t2); + }); + } + }; + + let unique_targets_impl_rest = (3..=count).map(|c| { + let field_name = field_name_for(c); + quote! { + self.#field_name.iter().for_each(|(_, inners, t_last)| { + inners.iter().for_each(|(t, _)| { + maybe_insert_target(*t); + }); + maybe_insert_target(*t_last); + }); + } + }).collect::(); + + quote! { + #unique_targets_impl_single + #unique_targets_impl_double + #unique_targets_impl_rest + } +} + fn imports() -> Result { if std::env::var("CARGO_PKG_NAME").unwrap() == "sp-npos-elections" { Ok(quote! { diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 7969a68c6bcad43a0cec35cc47380c815418e37e..32bdd9853997f4010667ea23faf33c10c205c313 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,11 +14,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-npos-elections = { version = "2.0.0-rc5", path = ".." } -sp-std = { version = "2.0.0-rc5", path = "../../std" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } +sp-npos-elections = { version = "2.0.0", path = ".." } +sp-std = { version = "2.0.0", path = "../../std" } +sp-runtime = { version = "2.0.0", path = "../../runtime" } honggfuzz = "0.5" rand = { version = "0.7.3", features = ["std", "small_rng"] } +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } [[bin]] name = "reduce" @@ -27,3 +28,7 @@ path = "src/reduce.rs" [[bin]] name = "balance_solution" path = "src/balance_solution.rs" + +[[bin]] +name = "compact" +path = "src/compact.rs" diff --git a/primitives/npos-elections/fuzzer/src/compact.rs b/primitives/npos-elections/fuzzer/src/compact.rs new file mode 100644 index 0000000000000000000000000000000000000000..91f734bb5b7cb69ea87ca78f3d1c369f4921d792 --- /dev/null +++ b/primitives/npos-elections/fuzzer/src/compact.rs @@ -0,0 +1,34 @@ +use honggfuzz::fuzz; +use sp_npos_elections::generate_solution_type; +use sp_npos_elections::sp_arithmetic::Percent; +use sp_runtime::codec::{Encode, Error}; + +fn main() { + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + loop { + fuzz!(|fuzzer_data: &[u8]| { + let result_decoded: Result = + ::decode(&mut &fuzzer_data[..]); + // Ignore errors as not every random sequence of bytes can be decoded as InnerTestSolutionCompact + if let Ok(decoded) = result_decoded { + // Decoding works, let's re-encode it and compare results. + let reencoded: std::vec::Vec = decoded.encode(); + // The reencoded value may or may not be equal to the original fuzzer output. However, the + // original decoder should be optimal (in the sense that there is no shorter encoding of + // the same object). So let's see if the fuzzer can find something shorter: + if fuzzer_data.len() < reencoded.len() { + panic!("fuzzer_data.len() < reencoded.len()"); + } + // The reencoded value should definitely be decodable (if unwrap() fails that is a valid + // panic/finding for the fuzzer): + let decoded2: InnerTestSolutionCompact = + ::decode( + &mut reencoded.as_slice(), + ).unwrap(); + // And it should be equal to the original decoded object (resulting from directly + // decoding fuzzer_data): + assert_eq!(decoded, decoded2); + } + }); + } +} diff --git a/primitives/npos-elections/src/reduce.rs b/primitives/npos-elections/src/reduce.rs index 6d458a5fffb3864d97ec5843a4a2e5f7894a2992..17d7dd1290f7d5e122799231ec354a93bf066438 100644 --- a/primitives/npos-elections/src/reduce.rs +++ b/primitives/npos-elections/src/reduce.rs @@ -52,6 +52,7 @@ use crate::{ExtendedBalance, IdentifierT, StakedAssignment}; use sp_arithmetic::traits::{Bounded, Zero}; use sp_std::{ collections::btree_map::{BTreeMap, Entry::*}, + vec, prelude::*, }; diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 8e99d2222e885ca67fdb39973a676b1209c698cc..d1769acd08144f30ecac0cd8e43b1b69b5fc35fe 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -1002,6 +1002,7 @@ mod solution_type { ); assert_eq!(compact.len(), 4); assert_eq!(compact.edge_count(), 2 + 4); + assert_eq!(compact.unique_targets(), vec![10, 11, 20, 40, 50, 51]); } #[test] @@ -1097,6 +1098,11 @@ mod solution_type { } ); + assert_eq!( + compacted.unique_targets(), + vec![0, 1, 2, 3, 4, 5, 6, 7, 8], + ); + let voter_at = |a: u32| -> Option { voters.get(>::try_into(a).unwrap()).cloned() }; @@ -1110,6 +1116,69 @@ mod solution_type { ); } + #[test] + fn unique_targets_len_edge_count_works() { + const ACC: TestAccuracy = TestAccuracy::from_percent(10); + + // we don't really care about voters here so all duplicates. This is not invalid per se. + let compact = TestSolutionCompact { + votes1: vec![(99, 1), (99, 2)], + votes2: vec![ + (99, (3, ACC.clone()), 7), + (99, (4, ACC.clone()), 8), + ], + votes3: vec![ + (99, [(11, ACC.clone()), (12, ACC.clone())], 13), + ], + // ensure the last one is also counted. + votes16: vec![ + ( + 99, + [ + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + ], + 67, + ) + ], + ..Default::default() + }; + + assert_eq!(compact.unique_targets(), vec![1, 2, 3, 4, 7, 8, 11, 12, 13, 66, 67]); + assert_eq!(compact.edge_count(), 2 + (2 * 2) + 3 + 16); + assert_eq!(compact.len(), 6); + + // this one has some duplicates. + let compact = TestSolutionCompact { + votes1: vec![(99, 1), (99, 1)], + votes2: vec![ + (99, (3, ACC.clone()), 7), + (99, (4, ACC.clone()), 8), + ], + votes3: vec![ + (99, [(11, ACC.clone()), (11, ACC.clone())], 13), + ], + ..Default::default() + }; + + assert_eq!(compact.unique_targets(), vec![1, 3, 4, 7, 8, 11, 13]); + assert_eq!(compact.edge_count(), 2 + (2 * 2) + 3); + assert_eq!(compact.len(), 5); + } + #[test] fn compact_into_assignment_must_report_overflow() { // in votes2 @@ -1165,7 +1234,7 @@ mod solution_type { assert_eq!(compacted.unwrap_err(), PhragmenError::CompactTargetOverflow); } - #[test] + #[test] fn zero_target_count_is_ignored() { let voters = vec![1 as AccountId, 2]; let targets = vec![10 as AccountId, 11]; diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index a2738ab26e134b65b3b0ae87cc39e4452c30fb00..02041d5c678ef852487f08abdefbeda9bceea88b 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -1,23 +1,24 @@ [package] description = "Substrate offchain workers primitives" name = "sp-offchain" -version = "2.0.0-rc5" +version = "2.0.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-api = { version = "2.0.0", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } [dev-dependencies] -sp-state-machine = { version = "0.8.0-rc5", default-features = false, path = "../state-machine" } +sp-state-machine = { version = "0.8.0", default-features = false, path = "../state-machine" } [features] default = ["std"] diff --git a/primitives/offchain/README.md b/primitives/offchain/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a8620d3bb9d5b2a6d45e27404891cece853eafc8 --- /dev/null +++ b/primitives/offchain/README.md @@ -0,0 +1,3 @@ +The Offchain Worker runtime api primitives. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index b778c1106b885fbac377b9273dfe50ced2669bf1..acf454b960a7c861924c4d706582a53d0d71b1a0 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-panic-handler" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Custom panic hook with bug report link" documentation = "https://docs.rs/sp-panic-handler" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/panic-handler/README.md b/primitives/panic-handler/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c08396960f4c953abda24049359cbb6649dbc7d2 --- /dev/null +++ b/primitives/panic-handler/README.md @@ -0,0 +1,10 @@ +Custom panic hook with bug report link + +This crate provides the [`set`] function, which wraps around [`std::panic::set_hook`] and +sets up a panic hook that prints a backtrace and invites the user to open an issue to the +given URL. + +By default, the panic handler aborts the process by calling [`std::process::exit`]. This can +temporarily be disabled by using an [`AbortGuard`]. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index c2211f6fb915668c75b683a0ef996e9b10cf615b..0c9fe8ebd666798a8bac921a3e9b12cd650c48f8 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -1,19 +1,20 @@ [package] name = "sp-rpc" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC primitives and utilities." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", features = ["derive"] } -sp-core = { version = "2.0.0-rc5", path = "../core" } +sp-core = { version = "2.0.0", path = "../core" } [dev-dependencies] serde_json = "1.0.41" diff --git a/primitives/rpc/README.md b/primitives/rpc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8a9c17edd4755be7c9b040bde1019ea85fabb1be --- /dev/null +++ b/primitives/rpc/README.md @@ -0,0 +1,3 @@ +Substrate RPC primitives and utilities. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 16d5a14e889b154b1bbadd29ee7708342f03e534..bc36098f05a54854591562980e297f8b2e8f32d3 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,25 +8,27 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate runtime interface" documentation = "https://docs.rs/sp-runtime-interface/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-wasm-interface = { version = "2.0.0-rc5", path = "../wasm-interface", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-tracing = { version = "2.0.0-rc5", default-features = false, path = "../tracing" } -sp-runtime-interface-proc-macro = { version = "2.0.0-rc5", path = "proc-macro" } -sp-externalities = { version = "0.8.0-rc5", optional = true, path = "../externalities" } +sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-tracing = { version = "2.0.0", default-features = false, path = "../tracing" } +sp-runtime-interface-proc-macro = { version = "2.0.0", path = "proc-macro" } +sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } static_assertions = "1.0.0" primitive-types = { version = "0.7.0", default-features = false } +sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } [dev-dependencies] -sp-runtime-interface-test-wasm = { version = "2.0.0-rc5", path = "test-wasm" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } -sp-core = { version = "2.0.0-rc5", path = "../core" } -sp-io = { version = "2.0.0-rc5", path = "../io" } +sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sp-core = { version = "2.0.0", path = "../core" } +sp-io = { version = "2.0.0", path = "../io" } rustversion = "1.0.0" trybuild = "1.0.23" diff --git a/primitives/runtime-interface/README.md b/primitives/runtime-interface/README.md new file mode 100644 index 0000000000000000000000000000000000000000..666bfe4d5a86117a2719f2225c134d1f47b9c844 --- /dev/null +++ b/primitives/runtime-interface/README.md @@ -0,0 +1,88 @@ +Substrate runtime interface + +This crate provides types, traits and macros around runtime interfaces. A runtime interface is +a fixed interface between a Substrate runtime and a Substrate node. For a native runtime the +interface maps to a direct function call of the implementation. For a wasm runtime the interface +maps to an external function call. These external functions are exported by the wasm executor +and they map to the same implementation as the native calls. + +# Using a type in a runtime interface + +Any type that should be used in a runtime interface as argument or return value needs to +implement [`RIType`]. The associated type [`FFIType`](RIType::FFIType) is the type that is used +in the FFI function to represent the actual type. For example `[T]` is represented by an `u64`. +The slice pointer and the length will be mapped to an `u64` value. For more information see +this [table](#ffi-type-and-conversion). The FFI function definition is used when calling from +the wasm runtime into the node. + +Traits are used to convert from a type to the corresponding [`RIType::FFIType`]. +Depending on where and how a type should be used in a function signature, a combination of the +following traits need to be implemented: + +1. Pass as function argument: [`wasm::IntoFFIValue`] and [`host::FromFFIValue`] +2. As function return value: [`wasm::FromFFIValue`] and [`host::IntoFFIValue`] +3. Pass as mutable function argument: [`host::IntoPreallocatedFFIValue`] + +The traits are implemented for most of the common types like `[T]`, `Vec`, arrays and +primitive types. + +For custom types, we provide the [`PassBy`](pass_by::PassBy) trait and strategies that define +how a type is passed between the wasm runtime and the node. Each strategy also provides a derive +macro to simplify the implementation. + +# Performance + +To not waste any more performance when calling into the node, not all types are SCALE encoded +when being passed as arguments between the wasm runtime and the node. For most types that +are raw bytes like `Vec`, `[u8]` or `[u8; N]` we pass them directly, without SCALE encoding +them in front of. The implementation of [`RIType`] each type provides more information on how +the data is passed. + +# Declaring a runtime interface + +Declaring a runtime interface is similar to declaring a trait in Rust: + +```rust +#[sp_runtime_interface::runtime_interface] +trait RuntimeInterface { + fn some_function(value: &[u8]) -> bool { + value.iter().all(|v| *v > 125) + } +} +``` + +For more information on declaring a runtime interface, see +[`#[runtime_interface]`](attr.runtime_interface.html). + +# FFI type and conversion + +The following table documents how values of types are passed between the wasm and +the host side and how they are converted into the corresponding type. + +| Type | FFI type | Conversion | +|----|----|----| +| `u8` | `u8` | `Identity` | +| `u16` | `u16` | `Identity` | +| `u32` | `u32` | `Identity` | +| `u64` | `u64` | `Identity` | +| `i128` | `u32` | `v.as_ptr()` (pointer to a 16 byte array) | +| `i8` | `i8` | `Identity` | +| `i16` | `i16` | `Identity` | +| `i32` | `i32` | `Identity` | +| `i64` | `i64` | `Identity` | +| `u128` | `u32` | `v.as_ptr()` (pointer to a 16 byte array) | +| `bool` | `u8` | `if v { 1 } else { 0 }` | +| `&str` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +| `&[u8]` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +| `Vec` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | +| `Vec where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +| `&[T] where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +| `[u8; N]` | `u32` | `v.as_ptr()` | +| `*const T` | `u32` | `Identity` | +| `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | +| [`T where T: PassBy`](pass_by::Inner) | Depends on inner | Depends on inner | +| [`T where T: PassBy`](pass_by::Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | + +`Identity` means that the value is converted directly into the corresponding FFI type. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 96a882471806d55e2578c53f486d8fa63c4b40bf..8358d2170575f1831297250145ea0daa81d6a841 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index 2f5b9de1c14e7da9dd07113cfe71facb6d97f776..df43551398a121ec6d667af1076d7929b6e6a319 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -26,21 +26,59 @@ //! 3. The [`PassByEnum`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Enum`. //! 4. The [`PassByInner`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Inner`. -use syn::{parse_macro_input, ItemTrait, DeriveInput}; +use syn::{parse_macro_input, ItemTrait, DeriveInput, Result, Token}; +use syn::parse::{Parse, ParseStream}; mod pass_by; mod runtime_interface; mod utils; +struct Options { + wasm_only: bool, + tracing: bool +} + +impl Options { + fn unpack(self) -> (bool, bool) { + (self.wasm_only, self.tracing) + } +} +impl Default for Options { + fn default() -> Self { + Options { wasm_only: false, tracing: true } + } +} + +impl Parse for Options { + fn parse(input: ParseStream) -> Result { + let mut res = Self::default(); + while !input.is_empty() { + let lookahead = input.lookahead1(); + if lookahead.peek(runtime_interface::keywords::wasm_only) { + let _ = input.parse::(); + res.wasm_only = true; + } else if lookahead.peek(runtime_interface::keywords::no_tracing) { + let _ = input.parse::(); + res.tracing = false; + } else if lookahead.peek(Token![,]) { + let _ = input.parse::(); + } else { + return Err(lookahead.error()) + } + } + Ok(res) + } +} + #[proc_macro_attribute] pub fn runtime_interface( attrs: proc_macro::TokenStream, input: proc_macro::TokenStream, ) -> proc_macro::TokenStream { let trait_def = parse_macro_input!(input as ItemTrait); - let wasm_only = parse_macro_input!(attrs as Option); + let (wasm_only, tracing) = parse_macro_input!(attrs as Options).unpack(); - runtime_interface::runtime_interface_impl(trait_def, wasm_only.is_some()) + runtime_interface::runtime_interface_impl(trait_def, wasm_only, tracing) .unwrap_or_else(|e| e.to_compile_error()) .into() } @@ -61,4 +99,4 @@ pub fn pass_by_inner(input: proc_macro::TokenStream) -> proc_macro::TokenStream pub fn pass_by_enum(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); pass_by::enum_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() -} +} \ No newline at end of file diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index 6760e9656113a9e8b3cfd0e7246a514ce915a477..2725bd2c89ce574811cb25b0b251cc84f6d3c0a8 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -46,7 +46,7 @@ use std::iter; /// Generate one bare function per trait method. The name of the bare function is equal to the name /// of the trait method. -pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { +pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool, tracing: bool) -> Result { let trait_name = &trait_def.ident; let runtime_interface = get_runtime_interface(trait_def)?; @@ -63,7 +63,7 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result = runtime_interface.all_versions().try_fold(token_stream?, |mut t, (version, method)| { - t.extend(function_std_impl(trait_name, method, version, is_wasm_only)?); + t.extend(function_std_impl(trait_name, method, version, is_wasm_only, tracing)?); Ok(t) }); @@ -145,6 +145,7 @@ fn function_std_impl( method: &TraitItemMethod, version: u32, is_wasm_only: bool, + tracing: bool, ) -> Result { let function_name = create_function_ident_with_version(&method.sig.ident, version); let function_name_str = function_name.to_string(); @@ -168,13 +169,21 @@ fn function_std_impl( let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); // Don't make the function public accessible when this is a wasm only interface. let call_to_trait = generate_call_to_trait(trait_name, method, version, is_wasm_only); + let call_to_trait = if !tracing { + call_to_trait + } else { + parse_quote!( + #crate_::sp_tracing::within_span! { #crate_::sp_tracing::trace_span!(#function_name_str); + #call_to_trait + } + ) + }; Ok( quote_spanned! { method.span() => #[cfg(feature = "std")] #( #attrs )* fn #function_name( #( #args, )* ) #return_value { - #crate_::sp_tracing::enter_span!(#function_name_str); #call_to_trait } } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 721eed649c25d5d0244615194f185b672c2bd1e3..7a4dbc5773a289e5fa3c3378213d81b757ac800d 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -227,7 +227,6 @@ fn generate_host_function_implementation( __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext, args: &mut dyn Iterator, ) -> std::result::Result, String> { - #crate_::sp_tracing::enter_span!(#name); #( #wasm_to_ffi_values )* #( #ffi_to_host_values )* #host_function_call diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index c9b6edf68fd5a48ec9f56200b3b2154c267baebe..02c291975738c7a6762ed4d6ad25a2f8a7a1238f 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -33,14 +33,20 @@ mod trait_decl_impl; pub mod keywords { // Custom keyword `wasm_only` that can be given as attribute to [`runtime_interface`]. syn::custom_keyword!(wasm_only); + // Disable tracing-macros added to the [`runtime_interface`] by specifying this optional entry + syn::custom_keyword!(no_tracing); } /// Implementation of the `runtime_interface` attribute. /// /// It expects the trait definition the attribute was put above and if this should be an wasm only /// interface. -pub fn runtime_interface_impl(trait_def: ItemTrait, is_wasm_only: bool) -> Result { - let bare_functions = bare_function_interface::generate(&trait_def, is_wasm_only)?; +pub fn runtime_interface_impl( + trait_def: ItemTrait, + is_wasm_only: bool, + tracing: bool, +) -> Result { + let bare_functions = bare_function_interface::generate(&trait_def, is_wasm_only, tracing)?; let crate_include = generate_runtime_interface_include(); let mod_name = Ident::new(&trait_def.ident.to_string().to_snake_case(), Span::call_site()); let trait_decl_impl = trait_decl_impl::process(&trait_def, is_wasm_only)?; diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 259d3517f001dcd415b452c09273baa2d2346589..da57cf086beef3497fac5e8104269e4c5e49d248 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -537,3 +537,7 @@ impl PassBy for sp_wasm_interface::ValueType { impl PassBy for sp_wasm_interface::Value { type PassBy = Codec; } + +impl PassBy for sp_storage::TrackedStorageKey { + type PassBy = Codec; +} diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index 562f94b278efcc60fd21b754dfcf4e1e65f88ab3..2273e453f104a8762ac58f7c80ef29a7eeeb710c 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -284,6 +284,14 @@ pub use sp_std; /// 1. The generated functions are not callable from the native side. /// 2. The trait as shown above is not implemented for `Externalities` and is instead implemented /// for `FunctionExecutor` (from `sp-wasm-interface`). +/// +/// # Disable tracing +/// By addding `no_tracing` to the list of options you can prevent the wasm-side interface from +/// generating the default `sp-tracing`-calls. Note that this is rarely needed but only meant for +/// the case when that would create a circular dependency. You usually _do not_ want to add this +/// flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) but is +/// super useful for debugging later. +/// pub use sp_runtime_interface_proc_macro::runtime_interface; #[doc(hidden)] diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index bd4f3f97ec7e316a7bd6214626b136cab2c7c552..7e31fb8ad2b80318ff6e7b28cd7d7edf557111c7 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test-wasm-deprecated" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../" } +sp-std = { version = "2.0.0", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0", default-features = false, path = "../../core" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index a1756883dede2de51db420be3a83b5459e363f4e..99cea1849e1b948a979b4a48480479dbc62d4c6a 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test-wasm" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,10 +13,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc5", default-features = false, path = "../" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../" } +sp-std = { version = "2.0.0", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0", default-features = false, path = "../../core" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 48dbeedbdad3a5228fefc808b9dbbf346f1dafed..0fd61f7bcea8af59d279b08a2a78f160b48f52e8 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-test" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,12 +12,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0-rc5", path = "../" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor" } -sp-runtime-interface-test-wasm = { version = "2.0.0-rc5", path = "../test-wasm" } -sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0-rc5", path = "../test-wasm-deprecated" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-core = { version = "2.0.0-rc5", path = "../../core" } -sp-io = { version = "2.0.0-rc5", path = "../../io" } -tracing = "0.1.18" +sp-runtime-interface = { version = "2.0.0", path = "../" } +sc-executor = { version = "0.8.0", path = "../../../client/executor" } +sp-runtime-interface-test-wasm = { version = "2.0.0", path = "../test-wasm" } +sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0", path = "../test-wasm-deprecated" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sp-runtime = { version = "2.0.0", path = "../../runtime" } +sp-core = { version = "2.0.0", path = "../../core" } +sp-io = { version = "2.0.0", path = "../../io" } +tracing = "0.1.19" +tracing-core = "0.1.15" diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index c213c977829e6f57b4a4cafdab21bca13052b4d9..c66609daa2f29695ed3d05435e5c5638744c8963 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -18,8 +18,6 @@ //! Integration tests for runtime interface primitives #![cfg(test)] -#![cfg(test)] - use sp_runtime_interface::*; use sp_runtime_interface_test_wasm::{wasm_binary_unwrap, test_api::HostFunctions}; @@ -157,14 +155,26 @@ fn test_versionining_with_new_host_works() { #[test] fn test_tracing() { - use tracing::span::Id as SpanId; + use std::fmt; + use tracing::{span::Id as SpanId}; + use tracing_core::field::{Field, Visit}; #[derive(Clone)] struct TracingSubscriber(Arc>); + struct FieldConsumer(&'static str, Option); + impl Visit for FieldConsumer { + + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + if field.name() == self.0 { + self.1 = Some(format!("{:?}", value)) + } + } + } + #[derive(Default)] struct Inner { - spans: HashSet<&'static str>, + spans: HashSet, } impl tracing::subscriber::Subscriber for TracingSubscriber { @@ -173,7 +183,9 @@ fn test_tracing() { fn new_span(&self, span: &tracing::span::Attributes) -> tracing::Id { let mut inner = self.0.lock().unwrap(); let id = SpanId::from_u64((inner.spans.len() + 1) as _); - inner.spans.insert(span.metadata().name()); + let mut f = FieldConsumer("name", None); + span.record(&mut f); + inner.spans.insert(f.1.unwrap_or_else(||span.metadata().name().to_owned())); id } @@ -196,5 +208,4 @@ fn test_tracing() { let inner = subscriber.0.lock().unwrap(); assert!(inner.spans.contains("return_input_version_1")); - assert!(inner.spans.contains("ext_test_api_return_input_version_1")); -} +} \ No newline at end of file diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 78c0bbcea8f85c35a04db0f7e94d94199725534f..6579a17c77fec015dbbb99e1ce0ff707c2e5c08b 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Runtime Modules shared primitive types." documentation = "https://docs.rs/sp-runtime" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "2.0.0-rc5", default-features = false, path = "../arithmetic" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../io" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "2.0.0", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-io = { version = "2.0.0", default-features = false, path = "../io" } log = { version = "0.4.8", optional = true } paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } @@ -33,7 +34,7 @@ either = { version = "1.5", default-features = false } [dev-dependencies] serde_json = "1.0.41" rand = "0.7.2" -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } [features] bench = [] diff --git a/primitives/runtime/README.md b/primitives/runtime/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1515cd8e2961b5a72fee1d39297e51305d16b0a6 --- /dev/null +++ b/primitives/runtime/README.md @@ -0,0 +1,3 @@ +Runtime Modules shared primitive types. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 5d65c13c664b7643eaa4d69b153b0e587fda7d4f..ee381d82bef817b734919aefac9a26a8c4c45646 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -159,7 +159,7 @@ impl BuildStorage for () { fn assimilate_storage( &self, _: &mut sp_core::storage::Storage, - )-> Result<(), String> { + ) -> Result<(), String> { Err("`assimilate_storage` not implemented for `()`".into()) } } @@ -795,7 +795,10 @@ impl SignatureBatching { impl Drop for SignatureBatching { fn drop(&mut self) { // Sanity check. If user forgets to actually call `verify()`. - if !self.0 { + // + // We should not panic if the current thread is already panicking, + // because Rust otherwise aborts the process. + if !self.0 && !sp_std::thread::panicking() { panic!("Signature verification has not been called before `SignatureBatching::drop`") } } @@ -885,4 +888,18 @@ mod tests { ); }); } + + #[test] + #[should_panic(expected = "Hey, I'm an error")] + fn batching_does_not_panic_while_thread_is_already_panicking() { + let mut ext = sp_state_machine::BasicExternalities::default(); + ext.register_extension( + sp_core::traits::TaskExecutorExt::new(sp_core::testing::TaskExecutor::new()), + ); + + ext.execute_with(|| { + let _batching = SignatureBatching::start(); + panic!("Hey, I'm an error"); + }); + } } diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 1aad9e75aec3477685ca28b721399170b138d7b6..e9e2f2b3d3c2be62d5fe1c2e3c882f1d410a97f6 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -98,7 +98,7 @@ impl From for &'static str { InvalidTransaction::BadProof => "Transaction has a bad signature", InvalidTransaction::AncientBirthBlock => "Transaction has an ancient birth block", InvalidTransaction::ExhaustsResources => - "Transaction would exhausts the block limits", + "Transaction would exhaust the block limits", InvalidTransaction::Payment => "Inability to pay some fees (e.g. account balance too low)", InvalidTransaction::BadMandatory => diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index ca5cfa4fdc3ccef50681301d2f6b799c0b88382b..70ae56fb48108f6c93922a2b0f50ab5559263733 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,26 +1,27 @@ [package] name = "sp-sandbox" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "This crate provides means to instantiate and execute wasm modules." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../io" } -sp-wasm-interface = { version = "2.0.0-rc5", default-features = false, path = "../wasm-interface" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-io = { version = "2.0.0", default-features = false, path = "../io" } +sp-wasm-interface = { version = "2.0.0", default-features = false, path = "../wasm-interface" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } [dev-dependencies] -wabt = "0.9.2" +wat = "1.0" assert_matches = "1.3.0" [features] diff --git a/primitives/sandbox/README.md b/primitives/sandbox/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9335b53ae1fb98ce0b0fd08f10e06c7c40a84362 --- /dev/null +++ b/primitives/sandbox/README.md @@ -0,0 +1,21 @@ +This crate provides means to instantiate and execute wasm modules. + +It works even when the user of this library executes from +inside the wasm VM. In this case the same VM is used for execution +of both the sandbox owner and the sandboxed module, without compromising security +and without the performance penalty of full wasm emulation inside wasm. + +This is achieved by using bindings to the wasm VM, which are published by the host API. +This API is thin and consists of only a handful functions. It contains functions for instantiating +modules and executing them, but doesn't contain functions for inspecting the module +structure. The user of this library is supposed to read the wasm module. + +When this crate is used in the `std` environment all these functions are implemented by directly +calling the wasm VM. + +Examples of possible use-cases for this library are not limited to the following: + +- implementing smart-contract runtimes that use wasm for contract code +- executing a wasm substrate runtime inside of a wasm parachain + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/sandbox/with_std.rs b/primitives/sandbox/with_std.rs index b5d6d89d0434f24cda01339188c4598d03dabb82..0f46f49503cac39cb540bdb1678e2bece24d0059 100755 --- a/primitives/sandbox/with_std.rs +++ b/primitives/sandbox/with_std.rs @@ -300,7 +300,6 @@ impl Instance { #[cfg(test)] mod tests { - use wabt; use crate::{Error, Value, ReturnValue, HostError, EnvironmentDefinitionBuilder, Instance}; use assert_matches::assert_matches; @@ -351,7 +350,7 @@ mod tests { #[test] fn invoke_args() { - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (import "env" "assert" (func $assert (param i32))) @@ -386,7 +385,7 @@ mod tests { #[test] fn return_value() { - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -408,7 +407,7 @@ mod tests { #[test] fn signatures_dont_matter() { - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module (import "env" "polymorphic_id" (func $id_i32 (param i32) (result i32))) (import "env" "polymorphic_id" (func $id_i64 (param i64) (result i64))) @@ -450,7 +449,7 @@ mod tests { let mut env_builder = EnvironmentDefinitionBuilder::new(); env_builder.add_host_func("env", "returns_i32", env_returns_i32); - let code = wabt::wat2wasm(r#" + let code = wat::parse_str(r#" (module ;; It's actually returns i32, but imported as if it returned i64 (import "env" "returns_i32" (func $returns_i32 (result i64))) diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 05b8cc74f2c8134ba92e7de667a53db581eff0a6..5a4514db86da92d183607498964e410404e30ecc 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-serializer" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate customizable serde serializer." documentation = "https://docs.rs/sp-serializer" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/serializer/README.md b/primitives/serializer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..083a0857cc0d29a968ad312124cbd8a5cb8e11fa --- /dev/null +++ b/primitives/serializer/README.md @@ -0,0 +1,6 @@ +Substrate customizable serde serializer. + +The idea is that we can later change the implementation +to something more compact, but for now we're using JSON. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 320e6adc1c7b57504d6ccd9021461a4c3794e5a6..4fccce6283142d83bd40a76ea9b172187a852195 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -1,23 +1,24 @@ [package] name = "sp-session" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Primitives for sessions" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-staking = { version = "2.0.0-rc5", default-features = false, path = "../staking" } -sp-runtime = { version = "2.0.0-rc5", optional = true, path = "../runtime" } +sp-api = { version = "2.0.0", default-features = false, path = "../api" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-staking = { version = "2.0.0", default-features = false, path = "../staking" } +sp-runtime = { version = "2.0.0", optional = true, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/session/README.md b/primitives/session/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2d1f9d9bc1d5b3cb77125be91d847b97c276833c --- /dev/null +++ b/primitives/session/README.md @@ -0,0 +1,3 @@ +Substrate core types around sessions. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index bb6ef06f34d61b0c1c088869e39823b8e76e2a5b..315d5acc49dae598b8341964758fa729db70012f 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -1,20 +1,21 @@ [package] name = "sp-staking" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "A crate which contains primitives that are useful for implementation that uses staking approaches in general. Definitions related to sessions, slashing, etc go here." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/staking/README.md b/primitives/staking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..892e1379d9a53d9677bcc13f5f0ba42b61ae6a0b --- /dev/null +++ b/primitives/staking/README.md @@ -0,0 +1,4 @@ +A crate which contains primitives that are useful for implementation that uses staking +approaches in general. Definitions related to sessions, slashing, etc go here. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 781d3b068a857deb81389c79bc54a34d10496f7d..8940488319931577f652db7ff9fad6f2a387baa1 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" @@ -8,30 +8,46 @@ license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-state-machine" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4.8" -parking_lot = "0.10.0" -hash-db = "0.15.2" -trie-db = "0.22.0" -trie-root = "0.16.0" -sp-trie = { version = "2.0.0-rc5", path = "../trie" } -sp-core = { version = "2.0.0-rc5", path = "../core" } -sp-panic-handler = { version = "2.0.0-rc5", path = "../panic-handler" } -codec = { package = "parity-scale-codec", version = "1.3.1" } -num-traits = "0.2.8" -rand = "0.7.2" -sp-externalities = { version = "0.8.0-rc5", path = "../externalities" } -itertools = "0.9" +log = { version = "0.4.8", optional = true } +parking_lot = { version = "0.10.0", optional = true } +hash-db = { version = "0.15.2", default-features = false } +trie-db = { version = "0.22.0", default-features = false } +trie-root = { version = "0.16.0", default-features = false } +sp-trie = { version = "2.0.0", path = "../trie", default-features = false } +sp-core = { version = "2.0.0", path = "../core", default-features = false } +sp-panic-handler = { version = "2.0.0", path = "../panic-handler", optional = true } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +num-traits = { version = "0.2.8", default-features = false } +rand = { version = "0.7.2", optional = true } +sp-externalities = { version = "0.8.0", path = "../externalities", default-features = false } smallvec = "1.4.1" +sp-std = { version = "2.0.0", default-features = false, path = "../std" } [dev-dependencies] -hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } +hex-literal = "0.3.1" +sp-runtime = { version = "2.0.0", path = "../runtime" } pretty_assertions = "0.6.1" [features] -default = [] +default = ["std"] +std = [ + "codec/std", + "hash-db/std", + "num-traits/std", + "sp-core/std", + "sp-externalities/std", + "sp-std/std", + "sp-trie/std", + "trie-db/std", + "trie-root/std", + "log", + "parking_lot", + "rand", + "sp-panic-handler", +] diff --git a/primitives/state-machine/README.md b/primitives/state-machine/README.md new file mode 100644 index 0000000000000000000000000000000000000000..aa244da62d50fc4ee549f1431d8f1dada8b48051 --- /dev/null +++ b/primitives/state-machine/README.md @@ -0,0 +1,3 @@ +Substrate state machine implementation. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 9ec03c4d1e249f181f0ffa22b9d55cae8ffa0c0c..360fe9a985682f457d0065ebecfee5846d3842f5 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -19,18 +19,23 @@ use hash_db::Hasher; use codec::{Decode, Encode}; -use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; +use sp_core::{ + storage::{ChildInfo, well_known_keys, TrackedStorageKey} +}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, + UsageInfo, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; +use sp_std::vec::Vec; +#[cfg(feature = "std")] +use sp_core::traits::RuntimeCode; /// A state backend is used to read state data and can have changes committed /// to it. /// /// The clone operation (if implemented) should be cheap. -pub trait Backend: std::fmt::Debug { +pub trait Backend: sp_std::fmt::Debug { /// An error type when fetching data is not possible. type Error: super::Error; @@ -212,7 +217,13 @@ pub trait Backend: std::fmt::Debug { } /// Commit given transaction to storage. - fn commit(&self, _: H::Out, _: Self::Transaction, _: StorageCollection) -> Result<(), Self::Error> { + fn commit( + &self, + _: H::Out, + _: Self::Transaction, + _: StorageCollection, + _: ChildStorageCollection, + ) -> Result<(), Self::Error> { unimplemented!() } @@ -226,10 +237,13 @@ pub trait Backend: std::fmt::Debug { unimplemented!() } - /// Update the whitelist for tracking db reads/writes - fn set_whitelist(&self, _: Vec>) { - unimplemented!() + /// Get the whitelist for tracking db reads/writes + fn get_whitelist(&self) -> Vec { + Default::default() } + + /// Update the whitelist for tracking db reads/writes + fn set_whitelist(&self, _: Vec) {} } impl<'a, T: Backend, H: Hasher> Backend for &'a T { @@ -363,11 +377,13 @@ pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: } /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. +#[cfg(feature = "std")] pub struct BackendRuntimeCode<'a, B, H> { backend: &'a B, _marker: std::marker::PhantomData, } +#[cfg(feature = "std")] impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for BackendRuntimeCode<'a, B, H> { @@ -376,6 +392,7 @@ impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for } } +#[cfg(feature = "std")] impl<'a, B: Backend, H: Hasher> BackendRuntimeCode<'a, B, H> where H::Out: Encode { /// Create a new instance. pub fn new(backend: &'a B) -> Self { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 3ddf79dbd9127ed922dacc8efbb7794d9590c9ba..3db7a54750a02f2e866d6a22c247d1440a92638c 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -27,7 +27,7 @@ use sp_trie::trie_types::Layout; use sp_core::{ storage::{ well_known_keys::is_child_storage_key, Storage, - ChildInfo, StorageChild, + ChildInfo, StorageChild, TrackedStorageKey, }, traits::Externalities, Blake2Hasher, }; @@ -325,7 +325,11 @@ impl Externalities for BasicExternalities { unimplemented!("reset_read_write_count is not supported in Basic") } - fn set_whitelist(&mut self, _: Vec>) { + fn get_whitelist(&self) -> Vec { + unimplemented!("get_whitelist is not supported in Basic") + } + + fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in Basic") } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 675904578be970137003c58372d887636cb6f861..b23481411ae2767616d2da0d5444fbed80c48024 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -140,8 +140,15 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { changes - .filter(|( _, v)| v.extrinsics().next().is_some()) - .try_fold(BTreeMap::new(), |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, v)| { + .filter_map(|(k, v)| { + let extrinsics = v.extrinsics(); + if !extrinsics.is_empty() { + Some((k, extrinsics)) + } else { + None + } + }) + .try_fold(BTreeMap::new(), |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { match map.entry(k) { Entry::Vacant(entry) => { // ignore temporary values (values that have null value at the end of operation @@ -161,7 +168,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( } }; - let extrinsics = v.extrinsics().cloned().collect(); + let extrinsics = extrinsics.into_iter().collect(); entry.insert((ExtrinsicIndex { block: block.clone(), key: k.to_vec(), @@ -170,11 +177,11 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Entry::Occupied(mut entry) => { // we do not need to check for temporary values here, because entry is Occupied // AND we are checking it before insertion - let extrinsics = &mut entry.get_mut().1; - extrinsics.extend( - v.extrinsics().cloned() + let entry_extrinsics = &mut entry.get_mut().1; + entry_extrinsics.extend( + extrinsics.into_iter() ); - extrinsics.sort(); + entry_extrinsics.sort(); }, } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 04322f1d5930caa1c9c0b71f86e6edd8b69309b7..fd7b38c052f9e5122458fefb5c2661803590aa2e 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -85,9 +85,6 @@ use crate::{ }, }; -/// Changes that are made outside of extrinsics are marked with this index; -pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; - /// Requirements for block number that can be used with changes tries. pub trait BlockNumber: Send + Sync + 'static + diff --git a/primitives/state-machine/src/error.rs b/primitives/state-machine/src/error.rs index 5468262f54a2ce275822987ba8d60f356bfd8ab3..489f6e6666001c630a2ec5f077fc4a5859c3cbf3 100644 --- a/primitives/state-machine/src/error.rs +++ b/primitives/state-machine/src/error.rs @@ -17,7 +17,7 @@ /// State Machine Errors -use std::fmt; +use sp_std::fmt; /// State Machine Error bound. /// @@ -34,7 +34,7 @@ impl Error for T {} #[derive(Debug, Eq, PartialEq)] pub enum ExecutionError { /// Backend error. - Backend(String), + Backend(crate::DefaultError), /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. CodeEntryDoesNotExist, /// Backend is incompatible with execution proof generation process. diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d7d4bc145eb068400d27c37dddfd9272da221f2f..e9259f9a10bc1a76dcefb944b811e4c1d6a7e1bc 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -18,23 +18,28 @@ //! Concrete externalities implementation. use crate::{ - StorageKey, StorageValue, OverlayedChanges, StorageTransactionCache, + StorageKey, StorageValue, OverlayedChanges, backend::Backend, - changes_trie::State as ChangesTrieState, }; - use hash_db::Hasher; use sp_core::{ - offchain::storage::OffchainOverlayedChanges, - storage::{well_known_keys::is_child_storage_key, ChildInfo}, - traits::Externalities, hexdisplay::HexDisplay, + storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, + hexdisplay::HexDisplay, }; use sp_trie::{trie_types::Layout, empty_child_trie_root}; -use sp_externalities::{Extensions, Extension}; +use sp_externalities::{Externalities, Extensions, Extension, + ExtensionStore}; use codec::{Decode, Encode, EncodeAppend}; -use std::{error, fmt, any::{Any, TypeId}}; -use log::{warn, trace}; +use sp_std::{fmt, any::{Any, TypeId}, vec::Vec, vec, boxed::Box}; +use crate::{warn, trace, log_error}; +#[cfg(feature = "std")] +use sp_core::offchain::storage::OffchainOverlayedChanges; +#[cfg(feature = "std")] +use crate::changes_trie::State as ChangesTrieState; +use crate::StorageTransactionCache; +#[cfg(feature = "std")] +use std::error; const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; const BENCHMARKING_FN: &str = "\ @@ -42,7 +47,19 @@ const BENCHMARKING_FN: &str = "\ For that reason client started transactions before calling into runtime are not allowed. Without client transactions the loop condition garantuees the success of the tx close."; + +#[cfg(feature = "std")] +fn guard() -> sp_panic_handler::AbortGuard { + sp_panic_handler::AbortGuard::force_abort() +} + +#[cfg(not(feature = "std"))] +fn guard() -> () { + () +} + /// Errors that can occur when interacting with the externalities. +#[cfg(feature = "std")] #[derive(Debug, Copy, Clone)] pub enum Error { /// Failure to load state data from the backend. @@ -53,6 +70,7 @@ pub enum Error { Executor(E), } +#[cfg(feature = "std")] impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -62,6 +80,7 @@ impl fmt::Display for Error { } } +#[cfg(feature = "std")] impl error::Error for Error { fn description(&self) -> &str { match *self { @@ -81,30 +100,49 @@ pub struct Ext<'a, H, N, B> /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, /// The overlayed changes destined for the Offchain DB. + #[cfg(feature = "std")] offchain_overlay: &'a mut OffchainOverlayedChanges, /// The storage backend to read from. backend: &'a B, /// The cache for the storage transactions. storage_transaction_cache: &'a mut StorageTransactionCache, /// Changes trie state to read from. + #[cfg(feature = "std")] changes_trie_state: Option>, /// Pseudo-unique id used for tracing. pub id: u16, /// Dummy usage of N arg. - _phantom: std::marker::PhantomData, + _phantom: sp_std::marker::PhantomData, /// Extensions registered with this instance. + #[cfg(feature = "std")] extensions: Option<&'a mut Extensions>, } + impl<'a, H, N, B> Ext<'a, H, N, B> -where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, + where + H: Hasher, + B: Backend, + N: crate::changes_trie::BlockNumber, { + /// Create a new `Ext`. + #[cfg(not(feature = "std"))] + pub fn new( + overlay: &'a mut OverlayedChanges, + storage_transaction_cache: &'a mut StorageTransactionCache, + backend: &'a B, + ) -> Self { + Ext { + overlay, + backend, + id: 0, + storage_transaction_cache, + _phantom: Default::default(), + } + } /// Create a new `Ext` from overlayed changes and read-only backend + #[cfg(feature = "std")] pub fn new( overlay: &'a mut OverlayedChanges, offchain_overlay: &'a mut OffchainOverlayedChanges, @@ -133,6 +171,7 @@ where } /// Read only accessor for the scheduled overlay changes. + #[cfg(feature = "std")] pub fn get_offchain_storage_changes(&self) -> &OffchainOverlayedChanges { &*self.offchain_overlay } @@ -159,14 +198,14 @@ where } } -impl<'a, H, B, N> Externalities for Ext<'a, H, N, B> +impl<'a, H, N, B> Externalities for Ext<'a, H, N, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, - B: 'a + Backend, + B: Backend, N: crate::changes_trie::BlockNumber, { - + #[cfg(feature = "std")] fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { use ::sp_core::offchain::STORAGE_PREFIX; match value { @@ -175,8 +214,11 @@ where } } + #[cfg(not(feature = "std"))] + fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} + fn storage(&self, key: &[u8]) -> Option { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let result = self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); trace!(target: "state", "{:04x}: Get {}={:?}", @@ -188,7 +230,7 @@ where } fn storage_hash(&self, key: &[u8]) -> Option> { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let result = self.overlay .storage(key) .map(|x| x.map(|x| H::hash(x))) @@ -207,7 +249,7 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let result = self.overlay .child_storage(child_info, key) .map(|x| x.map(|x| x.to_vec())) @@ -231,7 +273,7 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option> { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let result = self.overlay .child_storage(child_info, key) .map(|x| x.map(|x| H::hash(x))) @@ -251,7 +293,7 @@ where } fn exists_storage(&self, key: &[u8]) -> bool { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let result = match self.overlay.storage(key) { Some(x) => x.is_some(), _ => self.backend.exists_storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL), @@ -271,7 +313,7 @@ where child_info: &ChildInfo, key: &[u8], ) -> bool { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let result = match self.overlay.child_storage(child_info, key) { Some(x) => x.is_some(), @@ -337,7 +379,7 @@ where HexDisplay::from(&key), value.as_ref().map(HexDisplay::from) ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to directly set child storage key"); return; @@ -359,7 +401,7 @@ where HexDisplay::from(&key), value.as_ref().map(HexDisplay::from) ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); self.mark_dirty(); self.overlay.set_child_storage(child_info, key, value); @@ -373,7 +415,7 @@ where self.id, HexDisplay::from(&child_info.storage_key()), ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); self.mark_dirty(); self.overlay.clear_child_storage(child_info); @@ -387,7 +429,7 @@ where self.id, HexDisplay::from(&prefix), ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); if is_child_storage_key(prefix) { warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); return; @@ -410,7 +452,7 @@ where HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&prefix), ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); self.mark_dirty(); self.overlay.clear_child_prefix(child_info, prefix); @@ -430,7 +472,7 @@ where HexDisplay::from(&value), ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); self.mark_dirty(); let backend = &mut self.backend; @@ -446,7 +488,7 @@ where } fn storage_root(&mut self) -> Vec { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { trace!(target: "state", "{:04x}: Root(cached) {}", self.id, @@ -464,7 +506,7 @@ where &mut self, child_info: &ChildInfo, ) -> Vec { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { @@ -525,8 +567,14 @@ where } } + #[cfg(not(feature = "std"))] + fn storage_changes_root(&mut self, _parent_hash: &[u8]) -> Result>, ()> { + Ok(None) + } + + #[cfg(feature = "std")] fn storage_changes_root(&mut self, parent_hash: &[u8]) -> Result>, ()> { - let _guard = sp_panic_handler::AbortGuard::force_abort(); + let _guard = guard(); let root = self.overlay.changes_trie_root( self.backend, self.changes_trie_state.as_ref(), @@ -569,6 +617,7 @@ where } self.overlay.drain_storage_changes( &self.backend, + #[cfg(feature = "std")] None, Default::default(), self.storage_transaction_cache, @@ -586,6 +635,7 @@ where } let changes = self.overlay.drain_storage_changes( &self.backend, + #[cfg(feature = "std")] None, Default::default(), self.storage_transaction_cache, @@ -594,6 +644,7 @@ where changes.transaction_storage_root, changes.transaction, changes.main_storage_changes, + changes.child_storage_changes, ).expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); self.overlay @@ -609,12 +660,15 @@ where self.backend.reset_read_write_count() } - fn set_whitelist(&mut self, new: Vec>) { + fn get_whitelist(&self) -> Vec { + self.backend.get_whitelist() + } + + fn set_whitelist(&mut self, new: Vec) { self.backend.set_whitelist(new) } } - /// Implement `Encode` by forwarding the stored raw vec. struct EncodeOpaqueValue(Vec); @@ -639,12 +693,12 @@ impl<'a> StorageAppend<'a> { pub fn append(&mut self, value: Vec) { let value = vec![EncodeOpaqueValue(value)]; - let item = std::mem::take(self.0); + let item = sp_std::mem::take(self.0); *self.0 = match Vec::::append_or_new(item, &value) { Ok(item) => item, Err(_) => { - log::error!( + log_error!( target: "runtime", "Failed to append value, resetting storage item to `[value]`.", ); @@ -654,7 +708,36 @@ impl<'a> StorageAppend<'a> { } } -impl<'a, H, B, N> sp_externalities::ExtensionStore for Ext<'a, H, N, B> +#[cfg(not(feature = "std"))] +impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B> +where + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + B: Backend, + N: crate::changes_trie::BlockNumber, +{ + fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> { + None + } + + fn register_extension_with_type_id( + &mut self, + _type_id: TypeId, + _extension: Box, + ) -> Result<(), sp_externalities::Error> { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } + + fn deregister_extension_by_type_id( + &mut self, + _type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } +} + +#[cfg(feature = "std")] +impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B> where H: Hasher, B: 'a + Backend, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ee0980f59b926a95532f3bcf659c66117a1dee83..5b86640aa7d0e06aff4c7288ef7d886b352af6fd 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -18,747 +18,851 @@ //! Substrate state machine implementation. #![warn(missing_docs)] - -use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; -use log::{warn, trace}; -use hash_db::Hasher; -use codec::{Decode, Encode, Codec}; -use sp_core::{ - offchain::storage::OffchainOverlayedChanges, - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, - traits::{CodeExecutor, CallInWasmExt, RuntimeCode, SpawnNamed}, -}; -use sp_externalities::Extensions; +#![cfg_attr(not(feature = "std"), no_std)] pub mod backend; +#[cfg(feature = "std")] mod in_memory_backend; +#[cfg(feature = "std")] mod changes_trie; mod error; mod ext; +#[cfg(feature = "std")] mod testing; +#[cfg(feature = "std")] mod basic; mod overlayed_changes; +#[cfg(feature = "std")] mod proving_backend; mod trie_backend; mod trie_backend_essence; mod stats; +#[cfg(feature = "std")] mod read_only; -pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; -pub use testing::TestExternalities; -pub use basic::BasicExternalities; -pub use read_only::{ReadOnlyExternalities, InspectState}; -pub use ext::Ext; -pub use backend::Backend; -pub use changes_trie::{ - AnchorBlockId as ChangesTrieAnchorBlockId, - State as ChangesTrieState, - Storage as ChangesTrieStorage, - RootsStorage as ChangesTrieRootsStorage, - InMemoryStorage as InMemoryChangesTrieStorage, - BuildCache as ChangesTrieBuildCache, - CacheAction as ChangesTrieCacheAction, - ConfigurationRange as ChangesTrieConfigurationRange, - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, - prune as prune_changes_tries, - disabled_state as disabled_changes_trie_state, - BlockNumber as ChangesTrieBlockNumber, -}; -pub use overlayed_changes::{ - OverlayedChanges, StorageChanges, StorageTransactionCache, StorageKey, StorageValue, - StorageCollection, ChildStorageCollection, -}; -pub use proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, -}; -pub use trie_backend_essence::{TrieBackendStorage, Storage}; -pub use trie_backend::TrieBackend; -pub use error::{Error, ExecutionError}; -pub use in_memory_backend::new_in_mem; -pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; - -const PROOF_CLOSE_TRANSACTION: &str = "\ - Closing a transaction that was started in this function. Client initiated transactions - are protected from being closed by the runtime. qed"; - -type CallResult = Result, E>; - -/// Default handler of the execution manager. -pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; - -/// Type of changes trie transaction. -pub type ChangesTrieTransaction = ( - MemoryDB, - ChangesTrieCacheAction<::Out, N>, -); - -/// Trie backend with in-memory storage. -pub type InMemoryBackend = TrieBackend, H>; - -/// Strategy for executing a call into the runtime. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum ExecutionStrategy { - /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. - AlwaysWasm, - /// Run with both the wasm and the native variant (if compatible). Report any discrepancy as an error. - Both, - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, +#[cfg(feature = "std")] +pub use std_reexport::*; + +#[cfg(feature = "std")] +pub use execution::*; +#[cfg(feature = "std")] +pub use log::{debug, warn, trace, error as log_error}; + +/// In no_std we skip logs for state_machine, this macro +/// is a noops. +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! warn { + (target: $target:expr, $($arg:tt)+) => ( + () + ); + ($($arg:tt)+) => ( + () + ); } -/// Storage backend trust level. -#[derive(Debug, Clone)] -pub enum BackendTrustLevel { - /// Panics from trusted backends are considered justified, and never caught. - Trusted, - /// Panics from untrusted backend are caught and interpreted as runtime error. - /// Untrusted backend may be missing some parts of the trie, so panics are not considered - /// fatal. - Untrusted, +/// In no_std we skip logs for state_machine, this macro +/// is a noops. +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! debug { + (target: $target:expr, $($arg:tt)+) => ( + () + ); + ($($arg:tt)+) => ( + () + ); } -/// Like `ExecutionStrategy` only it also stores a handler in case of consensus failure. -#[derive(Clone)] -pub enum ExecutionManager { - /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. The backend on which code is executed code could be - /// trusted to provide all storage or not (i.e. the light client cannot be trusted to provide - /// for all storage queries since the storage entries it has come from an external node). - AlwaysWasm(BackendTrustLevel), - /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of any discrepancy. - Both(F), - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, +/// In no_std we skip logs for state_machine, this macro +/// is a noops. +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! trace { + (target: $target:expr, $($arg:tt)+) => ( + () + ); + ($($arg:tt)+) => ( + () + ); } -impl<'a, F> From<&'a ExecutionManager> for ExecutionStrategy { - fn from(s: &'a ExecutionManager) -> Self { - match *s { - ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible, - ExecutionManager::AlwaysWasm(_) => ExecutionStrategy::AlwaysWasm, - ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm, - ExecutionManager::Both(_) => ExecutionStrategy::Both, - } - } +/// In no_std we skip logs for state_machine, this macro +/// is a noops. +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! log_error { + (target: $target:expr, $($arg:tt)+) => ( + () + ); + ($($arg:tt)+) => ( + () + ); } -impl ExecutionStrategy { - /// Gets the corresponding manager for the execution strategy. - pub fn get_manager( - self, - ) -> ExecutionManager> { - match self { - ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), - ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, - ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, - ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { - warn!( - "Consensus error between wasm {:?} and native {:?}. Using wasm.", - wasm_result, - native_result, - ); - warn!(" Native result {:?}", native_result); - warn!(" Wasm result {:?}", wasm_result); - wasm_result - }), - } +/// Default error type to use with state machine trie backend. +#[cfg(feature = "std")] +pub type DefaultError = String; +/// Error type to use with state machine trie backend. +#[cfg(not(feature = "std"))] +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] +pub struct DefaultError; + +#[cfg(not(feature = "std"))] +impl sp_std::fmt::Display for DefaultError { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "DefaultError") } } -/// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type. -pub fn native_else_wasm() -> ExecutionManager> { - ExecutionManager::NativeElseWasm -} +pub use crate::overlayed_changes::{ + OverlayedChanges, StorageKey, StorageValue, + StorageCollection, ChildStorageCollection, + StorageChanges, StorageTransactionCache, +}; +pub use crate::backend::Backend; +pub use crate::trie_backend_essence::{TrieBackendStorage, Storage}; +pub use crate::trie_backend::TrieBackend; +pub use crate::stats::{UsageInfo, UsageUnit, StateMachineStats}; +pub use error::{Error, ExecutionError}; +pub use crate::ext::Ext; -/// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out the type. -fn always_wasm() -> ExecutionManager> { - ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) -} +#[cfg(not(feature = "std"))] +mod changes_trie { + /// Stub for change trie block number until + /// change trie move to no_std. + pub trait BlockNumber {} -/// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out the type. -fn always_untrusted_wasm() -> ExecutionManager> { - ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) + impl BlockNumber for N {} } -/// The substrate state machine. -pub struct StateMachine<'a, B, H, N, Exec> - where - H: Hasher, - B: Backend, - N: ChangesTrieBlockNumber, -{ - backend: &'a B, - exec: &'a Exec, - method: &'a str, - call_data: &'a [u8], - overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, - extensions: Extensions, - changes_trie_state: Option>, - storage_transaction_cache: Option<&'a mut StorageTransactionCache>, - runtime_code: &'a RuntimeCode<'a>, - stats: StateMachineStats, +#[cfg(feature = "std")] +mod std_reexport { + pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; + pub use crate::testing::TestExternalities; + pub use crate::basic::BasicExternalities; + pub use crate::read_only::{ReadOnlyExternalities, InspectState}; + pub use crate::changes_trie::{ + AnchorBlockId as ChangesTrieAnchorBlockId, + State as ChangesTrieState, + Storage as ChangesTrieStorage, + RootsStorage as ChangesTrieRootsStorage, + InMemoryStorage as InMemoryChangesTrieStorage, + BuildCache as ChangesTrieBuildCache, + CacheAction as ChangesTrieCacheAction, + ConfigurationRange as ChangesTrieConfigurationRange, + key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, + prune as prune_changes_tries, + disabled_state as disabled_changes_trie_state, + BlockNumber as ChangesTrieBlockNumber, + }; + pub use crate::proving_backend::{ + create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + }; + pub use crate::error::{Error, ExecutionError}; + pub use crate::in_memory_backend::new_in_mem; } -impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> where - H: Hasher, - B: Backend, - N: ChangesTrieBlockNumber, -{ - fn drop(&mut self) { - self.backend.register_overlay_stats(&self.stats); +#[cfg(feature = "std")] +mod execution { + use super::*; + use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; + use log::{warn, trace}; + use hash_db::Hasher; + use codec::{Decode, Encode, Codec}; + use sp_core::{ + offchain::storage::OffchainOverlayedChanges, + storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, + traits::{CodeExecutor, CallInWasmExt, RuntimeCode, SpawnNamed}, + }; + use sp_externalities::Extensions; + + + const PROOF_CLOSE_TRANSACTION: &str = "\ + Closing a transaction that was started in this function. Client initiated transactions + are protected from being closed by the runtime. qed"; + + pub(crate) type CallResult = Result, E>; + + /// Default handler of the execution manager. + pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; + + /// Type of changes trie transaction. + pub type ChangesTrieTransaction = ( + MemoryDB, + ChangesTrieCacheAction<::Out, N>, + ); + + /// Trie backend with in-memory storage. + pub type InMemoryBackend = TrieBackend, H>; + + /// Strategy for executing a call into the runtime. + #[derive(Copy, Clone, Eq, PartialEq, Debug)] + pub enum ExecutionStrategy { + /// Execute with the native equivalent if it is compatible with the given wasm module; + /// otherwise fall back to the wasm. + NativeWhenPossible, + /// Use the given wasm module. + AlwaysWasm, + /// Run with both the wasm and the native variant (if compatible). Report any discrepancy as an error. + Both, + /// First native, then if that fails or is not possible, wasm. + NativeElseWasm, } -} -impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + Clone + 'static, - B: Backend, - N: crate::changes_trie::BlockNumber, -{ - /// Creates new substrate state machine. - pub fn new( - backend: &'a B, - changes_trie_state: Option>, - overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, - exec: &'a Exec, - method: &'a str, - call_data: &'a [u8], - mut extensions: Extensions, - runtime_code: &'a RuntimeCode, - spawn_handle: impl SpawnNamed + Send + 'static, - ) -> Self { - extensions.register(CallInWasmExt::new(exec.clone())); - extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); - - Self { - backend, - exec, - method, - call_data, - extensions, - overlay, - offchain_overlay, - changes_trie_state, - storage_transaction_cache: None, - runtime_code, - stats: StateMachineStats::default(), - } + /// Storage backend trust level. + #[derive(Debug, Clone)] + pub enum BackendTrustLevel { + /// Panics from trusted backends are considered justified, and never caught. + Trusted, + /// Panics from untrusted backend are caught and interpreted as runtime error. + /// Untrusted backend may be missing some parts of the trie, so panics are not considered + /// fatal. + Untrusted, } - /// Use given `cache` as storage transaction cache. - /// - /// The cache will be used to cache storage transactions that can be build while executing a - /// function in the runtime. For example, when calculating the storage root a transaction is - /// build that will be cached. - pub fn with_storage_transaction_cache( - mut self, - cache: Option<&'a mut StorageTransactionCache>, - ) -> Self { - self.storage_transaction_cache = cache; - self + /// Like `ExecutionStrategy` only it also stores a handler in case of consensus failure. + #[derive(Clone)] + pub enum ExecutionManager { + /// Execute with the native equivalent if it is compatible with the given wasm module; + /// otherwise fall back to the wasm. + NativeWhenPossible, + /// Use the given wasm module. The backend on which code is executed code could be + /// trusted to provide all storage or not (i.e. the light client cannot be trusted to provide + /// for all storage queries since the storage entries it has come from an external node). + AlwaysWasm(BackendTrustLevel), + /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of any discrepancy. + Both(F), + /// First native, then if that fails or is not possible, wasm. + NativeElseWasm, } - /// Execute a call using the given state backend, overlayed changes, and call executor. - /// - /// On an error, no prospective changes are written to the overlay. - /// - /// Note: changes to code will be in place if this call is made again. For running partial - /// blocks (e.g. a transaction at a time), ensure a different method is used. - /// - /// Returns the SCALE encoded result of the executed function. - pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { - // We are not giving a native call and thus we are sure that the result can never be a native - // value. - self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - None, - ).map(NativeOrEncoded::into_encoded) + impl<'a, F> From<&'a ExecutionManager> for ExecutionStrategy { + fn from(s: &'a ExecutionManager) -> Self { + match *s { + ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible, + ExecutionManager::AlwaysWasm(_) => ExecutionStrategy::AlwaysWasm, + ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm, + ExecutionManager::Both(_) => ExecutionStrategy::Both, + } + } } - fn execute_aux( - &mut self, - use_native: bool, - native_call: Option, - ) -> ( - CallResult, - bool, - ) where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - { - let mut cache = StorageTransactionCache::default(); + impl ExecutionStrategy { + /// Gets the corresponding manager for the execution strategy. + pub fn get_manager( + self, + ) -> ExecutionManager> { + match self { + ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), + ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, + ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, + ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { + warn!( + "Consensus error between wasm {:?} and native {:?}. Using wasm.", + wasm_result, + native_result, + ); + warn!(" Native result {:?}", native_result); + warn!(" Wasm result {:?}", wasm_result); + wasm_result + }), + } + } + } - let cache = match self.storage_transaction_cache.as_mut() { - Some(cache) => cache, - None => &mut cache, - }; + /// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type. + pub fn native_else_wasm() -> ExecutionManager> { + ExecutionManager::NativeElseWasm + } - self.overlay.enter_runtime().expect("StateMachine is never called from the runtime; qed"); + /// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out the type. + fn always_wasm() -> ExecutionManager> { + ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) + } - let mut ext = Ext::new( - self.overlay, - self.offchain_overlay, - cache, - self.backend, - self.changes_trie_state.clone(), - Some(&mut self.extensions), - ); + /// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out the type. + fn always_untrusted_wasm() -> ExecutionManager> { + ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) + } - let id = ext.id; - trace!( - target: "state", "{:04x}: Call {} at {:?}. Input={:?}", - id, - self.method, - self.backend, - HexDisplay::from(&self.call_data), - ); + /// The substrate state machine. + pub struct StateMachine<'a, B, H, N, Exec> + where + H: Hasher, + B: Backend, + N: ChangesTrieBlockNumber, + { + backend: &'a B, + exec: &'a Exec, + method: &'a str, + call_data: &'a [u8], + overlay: &'a mut OverlayedChanges, + offchain_overlay: &'a mut OffchainOverlayedChanges, + extensions: Extensions, + changes_trie_state: Option>, + storage_transaction_cache: Option<&'a mut StorageTransactionCache>, + runtime_code: &'a RuntimeCode<'a>, + stats: StateMachineStats, + } - let (result, was_native) = self.exec.call( - &mut ext, - self.runtime_code, - self.method, - self.call_data, - use_native, - native_call, - ); + impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> where + H: Hasher, + B: Backend, + N: ChangesTrieBlockNumber, + { + fn drop(&mut self) { + self.backend.register_overlay_stats(&self.stats); + } + } - self.overlay.exit_runtime() - .expect("Runtime is not able to call this function in the overlay; qed"); + impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + Clone + 'static, + B: Backend, + N: crate::changes_trie::BlockNumber, + { + /// Creates new substrate state machine. + pub fn new( + backend: &'a B, + changes_trie_state: Option>, + overlay: &'a mut OverlayedChanges, + offchain_overlay: &'a mut OffchainOverlayedChanges, + exec: &'a Exec, + method: &'a str, + call_data: &'a [u8], + mut extensions: Extensions, + runtime_code: &'a RuntimeCode, + spawn_handle: impl SpawnNamed + Send + 'static, + ) -> Self { + extensions.register(CallInWasmExt::new(exec.clone())); + extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); + + Self { + backend, + exec, + method, + call_data, + extensions, + overlay, + offchain_overlay, + changes_trie_state, + storage_transaction_cache: None, + runtime_code, + stats: StateMachineStats::default(), + } + } - trace!( - target: "state", "{:04x}: Return. Native={:?}, Result={:?}", - id, - was_native, - result, - ); + /// Use given `cache` as storage transaction cache. + /// + /// The cache will be used to cache storage transactions that can be build while executing a + /// function in the runtime. For example, when calculating the storage root a transaction is + /// build that will be cached. + pub fn with_storage_transaction_cache( + mut self, + cache: Option<&'a mut StorageTransactionCache>, + ) -> Self { + self.storage_transaction_cache = cache; + self + } - (result, was_native) - } + /// Execute a call using the given state backend, overlayed changes, and call executor. + /// + /// On an error, no prospective changes are written to the overlay. + /// + /// Note: changes to code will be in place if this call is made again. For running partial + /// blocks (e.g. a transaction at a time), ensure a different method is used. + /// + /// Returns the SCALE encoded result of the executed function. + pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { + // We are not giving a native call and thus we are sure that the result can never be a native + // value. + self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + None, + ).map(NativeOrEncoded::into_encoded) + } - fn execute_call_with_both_strategy( - &mut self, - mut native_call: Option, - on_consensus_failure: Handler, - ) -> CallResult - where + fn execute_aux( + &mut self, + use_native: bool, + native_call: Option, + ) -> ( + CallResult, + bool, + ) where R: Decode + Encode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult - { - self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux(true, native_call.take()); + { + let mut cache = StorageTransactionCache::default(); - if was_native { - self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux( - false, + let cache = match self.storage_transaction_cache.as_mut() { + Some(cache) => cache, + None => &mut cache, + }; + + self.overlay.enter_runtime().expect("StateMachine is never called from the runtime; qed"); + + let mut ext = Ext::new( + self.overlay, + self.offchain_overlay, + cache, + self.backend, + self.changes_trie_state.clone(), + Some(&mut self.extensions), + ); + + let id = ext.id; + trace!( + target: "state", "{:04x}: Call {} at {:?}. Input={:?}", + id, + self.method, + self.backend, + HexDisplay::from(&self.call_data), + ); + + let (result, was_native) = self.exec.call( + &mut ext, + self.runtime_code, + self.method, + self.call_data, + use_native, native_call, ); - if (result.is_ok() && wasm_result.is_ok() - && result.as_ref().ok() == wasm_result.as_ref().ok()) - || result.is_err() && wasm_result.is_err() - { + self.overlay.exit_runtime() + .expect("Runtime is not able to call this function in the overlay; qed"); + + trace!( + target: "state", "{:04x}: Return. Native={:?}, Result={:?}", + id, + was_native, + result, + ); + + (result, was_native) + } + + fn execute_call_with_both_strategy( + &mut self, + mut native_call: Option, + on_consensus_failure: Handler, + ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult + { + self.overlay.start_transaction(); + let (result, was_native) = self.execute_aux(true, native_call.take()); + + if was_native { + self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); + let (wasm_result, _) = self.execute_aux( + false, + native_call, + ); + + if (result.is_ok() && wasm_result.is_ok() + && result.as_ref().ok() == wasm_result.as_ref().ok()) + || result.is_err() && wasm_result.is_err() + { + result + } else { + on_consensus_failure(wasm_result, result) + } + } else { + self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); + result + } + } + + fn execute_call_with_native_else_wasm_strategy( + &mut self, + mut native_call: Option, + ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + { + self.overlay.start_transaction(); + let (result, was_native) = self.execute_aux( + true, + native_call.take(), + ); + + if !was_native || result.is_ok() { + self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); result } else { - on_consensus_failure(wasm_result, result) + self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); + let (wasm_result, _) = self.execute_aux( + false, + native_call, + ); + wasm_result } - } else { - self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); - result + } + + /// Execute a call using the given state backend, overlayed changes, and call executor. + /// + /// On an error, no prospective changes are written to the overlay. + /// + /// Note: changes to code will be in place if this call is made again. For running partial + /// blocks (e.g. a transaction at a time), ensure a different method is used. + /// + /// Returns the result of the executed function either in native representation `R` or + /// in SCALE encoded representation. + pub fn execute_using_consensus_failure_handler( + &mut self, + manager: ExecutionManager, + mut native_call: Option, + ) -> Result, Box> + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult + { + let changes_tries_enabled = self.changes_trie_state.is_some(); + self.overlay.set_collect_extrinsics(changes_tries_enabled); + + let result = { + match manager { + ExecutionManager::Both(on_consensus_failure) => { + self.execute_call_with_both_strategy( + native_call.take(), + on_consensus_failure, + ) + }, + ExecutionManager::NativeElseWasm => { + self.execute_call_with_native_else_wasm_strategy( + native_call.take(), + ) + }, + ExecutionManager::AlwaysWasm(trust_level) => { + let _abort_guard = match trust_level { + BackendTrustLevel::Trusted => None, + BackendTrustLevel::Untrusted => Some(sp_panic_handler::AbortGuard::never_abort()), + }; + self.execute_aux(false, native_call).0 + }, + ExecutionManager::NativeWhenPossible => { + self.execute_aux(true, native_call).0 + }, + } + }; + + result.map_err(|e| Box::new(e) as _) } } - fn execute_call_with_native_else_wasm_strategy( - &mut self, - mut native_call: Option, - ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + /// Prove execution using the given state backend, overlayed changes, and call executor. + pub fn prove_execution( + mut backend: B, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Spawn, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, + ) -> Result<(Vec, StorageProof), Box> + where + B: Backend, + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + Clone + 'static, + N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, { - self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux( - true, - native_call.take(), - ); - - if !was_native || result.is_ok() { - self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); - result - } else { - self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux( - false, - native_call, - ); - wasm_result - } + let trie_backend = backend.as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_execution_on_trie_backend::<_, _, N, _, _>( + trie_backend, + overlay, + exec, + spawn_handle, + method, + call_data, + runtime_code, + ) } - /// Execute a call using the given state backend, overlayed changes, and call executor. + /// Prove execution using the given trie backend, overlayed changes, and call executor. + /// Produces a state-backend-specific "transaction" which can be used to apply the changes + /// to the backing store, such as the disk. + /// Execution proof is the set of all 'touched' storage DBValues from the backend. /// /// On an error, no prospective changes are written to the overlay. /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. - /// - /// Returns the result of the executed function either in native representation `R` or - /// in SCALE encoded representation. - pub fn execute_using_consensus_failure_handler( - &mut self, - manager: ExecutionManager, - mut native_call: Option, - ) -> Result, Box> - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult + pub fn prove_execution_on_trie_backend( + trie_backend: &TrieBackend, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Spawn, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, + ) -> Result<(Vec, StorageProof), Box> + where + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + 'static + Clone, + N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, { - let changes_tries_enabled = self.changes_trie_state.is_some(); - self.overlay.set_collect_extrinsics(changes_tries_enabled); - - let result = { - match manager { - ExecutionManager::Both(on_consensus_failure) => { - self.execute_call_with_both_strategy( - native_call.take(), - on_consensus_failure, - ) - }, - ExecutionManager::NativeElseWasm => { - self.execute_call_with_native_else_wasm_strategy( - native_call.take(), - ) - }, - ExecutionManager::AlwaysWasm(trust_level) => { - let _abort_guard = match trust_level { - BackendTrustLevel::Trusted => None, - BackendTrustLevel::Untrusted => Some(sp_panic_handler::AbortGuard::never_abort()), - }; - self.execute_aux(false, native_call).0 - }, - ExecutionManager::NativeWhenPossible => { - self.execute_aux(true, native_call).0 - }, - } - }; + let mut offchain_overlay = OffchainOverlayedChanges::default(); + let proving_backend = proving_backend::ProvingBackend::new(trie_backend); + let mut sm = StateMachine::<_, H, N, Exec>::new( + &proving_backend, + None, + overlay, + &mut offchain_overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); - result.map_err(|e| Box::new(e) as _) + let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_wasm(), + None, + )?; + let proof = sm.backend.extract_proof(); + Ok((result.into_encoded(), proof)) } -} -/// Prove execution using the given state backend, overlayed changes, and call executor. -pub fn prove_execution( - mut backend: B, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Spawn, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, -) -> Result<(Vec, StorageProof), Box> -where - B: Backend, - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, - Spawn: SpawnNamed + Send + 'static, -{ - let trie_backend = backend.as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _, _>( - trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) -} - -/// Prove execution using the given trie backend, overlayed changes, and call executor. -/// Produces a state-backend-specific "transaction" which can be used to apply the changes -/// to the backing store, such as the disk. -/// Execution proof is the set of all 'touched' storage DBValues from the backend. -/// -/// On an error, no prospective changes are written to the overlay. -/// -/// Note: changes to code will be in place if this call is made again. For running partial -/// blocks (e.g. a transaction at a time), ensure a different method is used. -pub fn prove_execution_on_trie_backend( - trie_backend: &TrieBackend, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Spawn, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, -) -> Result<(Vec, StorageProof), Box> -where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + 'static + Clone, - N: crate::changes_trie::BlockNumber, - Spawn: SpawnNamed + Send + 'static, -{ - let mut offchain_overlay = OffchainOverlayedChanges::default(); - let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine::<_, H, N, Exec>::new( - &proving_backend, - None, - overlay, - &mut offchain_overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); - - let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_wasm(), - None, - )?; - let proof = sm.backend.extract_proof(); - Ok((result.into_encoded(), proof)) -} - -/// Check execution proof, generated by `prove_execution` call. -pub fn execution_proof_check( - root: H::Out, - proof: StorageProof, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Spawn, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, -) -> Result, Box> -where - H: Hasher, - Exec: CodeExecutor + Clone + 'static, - H::Out: Ord + 'static + codec::Codec, - N: crate::changes_trie::BlockNumber, - Spawn: SpawnNamed + Send + 'static, -{ - let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _, _>( - &trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) -} + /// Check execution proof, generated by `prove_execution` call. + pub fn execution_proof_check( + root: H::Out, + proof: StorageProof, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Spawn, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, + ) -> Result, Box> + where + H: Hasher, + Exec: CodeExecutor + Clone + 'static, + H::Out: Ord + 'static + codec::Codec, + N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, + { + let trie_backend = create_proof_check_backend::(root.into(), proof)?; + execution_proof_check_on_trie_backend::<_, N, _, _>( + &trie_backend, + overlay, + exec, + spawn_handle, + method, + call_data, + runtime_code, + ) + } -/// Check execution proof on proving backend, generated by `prove_execution` call. -pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Spawn, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, -) -> Result, Box> -where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, - Spawn: SpawnNamed + Send + 'static, -{ - let mut offchain_overlay = OffchainOverlayedChanges::default(); - let mut sm = StateMachine::<_, H, N, Exec>::new( - trie_backend, - None, - overlay, - &mut offchain_overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); + /// Check execution proof on proving backend, generated by `prove_execution` call. + pub fn execution_proof_check_on_trie_backend( + trie_backend: &TrieBackend, H>, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Spawn, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, + ) -> Result, Box> + where + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + Clone + 'static, + N: crate::changes_trie::BlockNumber, + Spawn: SpawnNamed + Send + 'static, + { + let mut offchain_overlay = OffchainOverlayedChanges::default(); + let mut sm = StateMachine::<_, H, N, Exec>::new( + trie_backend, + None, + overlay, + &mut offchain_overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); - sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_untrusted_wasm(), - None, - ).map(NativeOrEncoded::into_encoded) -} + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_untrusted_wasm(), + None, + ).map(NativeOrEncoded::into_encoded) + } -/// Generate storage read proof. -pub fn prove_read( - mut backend: B, - keys: I, -) -> Result> -where - B: Backend, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let trie_backend = backend.as_trie_backend() - .ok_or_else( - || Box::new(ExecutionError::UnableToGenerateProof) as Box - )?; - prove_read_on_trie_backend(trie_backend, keys) -} + /// Generate storage read proof. + pub fn prove_read( + mut backend: B, + keys: I, + ) -> Result> + where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + { + let trie_backend = backend.as_trie_backend() + .ok_or_else( + || Box::new(ExecutionError::UnableToGenerateProof) as Box + )?; + prove_read_on_trie_backend(trie_backend, keys) + } -/// Generate child storage read proof. -pub fn prove_child_read( - mut backend: B, - child_info: &ChildInfo, - keys: I, -) -> Result> -where - B: Backend, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let trie_backend = backend.as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, child_info, keys) -} + /// Generate child storage read proof. + pub fn prove_child_read( + mut backend: B, + child_info: &ChildInfo, + keys: I, + ) -> Result> + where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + { + let trie_backend = backend.as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_child_read_on_trie_backend(trie_backend, child_info, keys) + } -/// Generate storage read proof on pre-created trie backend. -pub fn prove_read_on_trie_backend( - trie_backend: &TrieBackend, - keys: I, -) -> Result> -where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); - for key in keys.into_iter() { - proving_backend - .storage(key.as_ref()) - .map_err(|e| Box::new(e) as Box)?; + /// Generate storage read proof on pre-created trie backend. + pub fn prove_read_on_trie_backend( + trie_backend: &TrieBackend, + keys: I, + ) -> Result> + where + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + { + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + for key in keys.into_iter() { + proving_backend + .storage(key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + Ok(proving_backend.extract_proof()) } - Ok(proving_backend.extract_proof()) -} -/// Generate storage read proof on pre-created trie backend. -pub fn prove_child_read_on_trie_backend( - trie_backend: &TrieBackend, - child_info: &ChildInfo, - keys: I, -) -> Result> -where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); - for key in keys.into_iter() { - proving_backend - .child_storage(child_info, key.as_ref()) - .map_err(|e| Box::new(e) as Box)?; + /// Generate storage read proof on pre-created trie backend. + pub fn prove_child_read_on_trie_backend( + trie_backend: &TrieBackend, + child_info: &ChildInfo, + keys: I, + ) -> Result> + where + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + { + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + for key in keys.into_iter() { + proving_backend + .child_storage(child_info, key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + Ok(proving_backend.extract_proof()) } - Ok(proving_backend.extract_proof()) -} -/// Check storage read proof, generated by `prove_read` call. -pub fn read_proof_check( - root: H::Out, - proof: StorageProof, - keys: I, -) -> Result, Option>>, Box> -where - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let proving_backend = create_proof_check_backend::(root, proof)?; - let mut result = HashMap::new(); - for key in keys.into_iter() { - let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; - result.insert(key.as_ref().to_vec(), value); + /// Check storage read proof, generated by `prove_read` call. + pub fn read_proof_check( + root: H::Out, + proof: StorageProof, + keys: I, + ) -> Result, Option>>, Box> + where + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + { + let proving_backend = create_proof_check_backend::(root, proof)?; + let mut result = HashMap::new(); + for key in keys.into_iter() { + let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); + } + Ok(result) } - Ok(result) -} -/// Check child storage read proof, generated by `prove_child_read` call. -pub fn read_child_proof_check( - root: H::Out, - proof: StorageProof, - child_info: &ChildInfo, - keys: I, -) -> Result, Option>>, Box> -where - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, -{ - let proving_backend = create_proof_check_backend::(root, proof)?; - let mut result = HashMap::new(); - for key in keys.into_iter() { - let value = read_child_proof_check_on_proving_backend( - &proving_backend, - child_info, - key.as_ref(), - )?; - result.insert(key.as_ref().to_vec(), value); + /// Check child storage read proof, generated by `prove_child_read` call. + pub fn read_child_proof_check( + root: H::Out, + proof: StorageProof, + child_info: &ChildInfo, + keys: I, + ) -> Result, Option>>, Box> + where + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + { + let proving_backend = create_proof_check_backend::(root, proof)?; + let mut result = HashMap::new(); + for key in keys.into_iter() { + let value = read_child_proof_check_on_proving_backend( + &proving_backend, + child_info, + key.as_ref(), + )?; + result.insert(key.as_ref().to_vec(), value); + } + Ok(result) } - Ok(result) -} -/// Check storage read proof on pre-created proving backend. -pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, - key: &[u8], -) -> Result>, Box> -where - H: Hasher, - H::Out: Ord + Codec, -{ - proving_backend.storage(key).map_err(|e| Box::new(e) as Box) -} + /// Check storage read proof on pre-created proving backend. + pub fn read_proof_check_on_proving_backend( + proving_backend: &TrieBackend, H>, + key: &[u8], + ) -> Result>, Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + proving_backend.storage(key).map_err(|e| Box::new(e) as Box) + } -/// Check child storage read proof on pre-created proving backend. -pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, - child_info: &ChildInfo, - key: &[u8], -) -> Result>, Box> -where - H: Hasher, - H::Out: Ord + Codec, -{ - proving_backend.child_storage(child_info, key) - .map_err(|e| Box::new(e) as Box) + /// Check child storage read proof on pre-created proving backend. + pub fn read_child_proof_check_on_proving_backend( + proving_backend: &TrieBackend, H>, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + proving_backend.child_storage(child_info, key) + .map_err(|e| Box::new(e) as Box) + } } #[cfg(test)] @@ -772,6 +876,15 @@ mod tests { map, traits::{Externalities, RuntimeCode}, testing::TaskExecutor, }; use sp_runtime::traits::BlakeTwo256; + use std::{result, collections::HashMap}; + use codec::Decode; + use sp_core::{ + offchain::storage::OffchainOverlayedChanges, + storage::ChildInfo, NativeOrEncoded, NeverNativeValue, + traits::CodeExecutor, + }; + use crate::execution::CallResult; + #[derive(Clone)] struct DummyCodeExecutor { diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index fe43c0ea99d89dcd563e897f7cf997962376f0ed..5e4fd77c685631f987e6fc7471d0878d82798f60 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -17,18 +17,22 @@ //! Houses the code that implements the transactional overlay storage. -use super::{StorageKey, StorageValue}; +use super::{StorageKey, StorageValue, Extrinsics}; -use itertools::Itertools; -use std::collections::{HashSet, BTreeMap, BTreeSet}; +#[cfg(feature = "std")] +use std::collections::HashSet as Set; +#[cfg(not(feature = "std"))] +use sp_std::collections::btree_set::BTreeSet as Set; + +use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use smallvec::SmallVec; -use log::warn; +use crate::warn; const PROOF_OVERLAY_NON_EMPTY: &str = "\ An OverlayValue is always created with at least one transaction and dropped as soon as the last transaction is removed; qed"; -type DirtyKeysSets = SmallVec<[HashSet; 5]>; +type DirtyKeysSets = SmallVec<[Set; 5]>; type Transactions = SmallVec<[InnerValue; 5]>; /// Error returned when trying to commit or rollback while no transaction is open or @@ -63,7 +67,7 @@ struct InnerValue { value: Option, /// The set of extrinsic indices where the values has been changed. /// Is filled only if runtime has announced changes trie support. - extrinsics: BTreeSet, + extrinsics: Extrinsics, } /// An overlay that contains all versions of a value for a specific key. @@ -105,8 +109,10 @@ impl OverlayedValue { } /// Unique list of extrinsic indices which modified the value. - pub fn extrinsics(&self) -> impl Iterator { - self.transactions.iter().flat_map(|t| t.extrinsics.iter()).unique() + pub fn extrinsics(&self) -> BTreeSet { + let mut set = BTreeSet::new(); + self.transactions.iter().for_each(|t| t.extrinsics.copy_extrinsics_into(&mut set)); + set } /// Mutable reference to the most recent version. @@ -120,7 +126,7 @@ impl OverlayedValue { } /// Mutable reference to the set which holds the indices for the **current transaction only**. - fn transaction_extrinsics_mut(&mut self) -> &mut BTreeSet { + fn transaction_extrinsics_mut(&mut self) -> &mut Extrinsics { &mut self.transactions.last_mut().expect(PROOF_OVERLAY_NON_EMPTY).extrinsics } @@ -163,9 +169,9 @@ impl OverlayedChangeSet { /// This changeset might be created when there are already open transactions. /// We need to catch up here so that the child is at the same transaction depth. pub fn spawn_child(&self) -> Self { - use std::iter::repeat; + use sp_std::iter::repeat; Self { - dirty_keys: repeat(HashSet::new()).take(self.transaction_depth()).collect(), + dirty_keys: repeat(Set::new()).take(self.transaction_depth()).collect(), num_client_transactions: self.num_client_transactions, execution_mode: self.execution_mode, .. Default::default() @@ -232,7 +238,7 @@ impl OverlayedChangeSet { at_extrinsic: Option, ) { for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { - val.set(None, insert_dirty(&mut self.dirty_keys, key.to_owned()), at_extrinsic); + val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); } } @@ -243,7 +249,7 @@ impl OverlayedChangeSet { /// Get the change that is next to the supplied key. pub fn next_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { - use std::ops::Bound; + use sp_std::ops::Bound; let range = (Bound::Excluded(key), Bound::Unbounded); self.changes.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v)) } @@ -388,7 +394,7 @@ mod test { fn assert_changes(is: &OverlayedChangeSet, expected: &Changes) { let is: Changes = is.changes().map(|(k, v)| { - (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().cloned().collect())) + (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) }).collect(); assert_eq!(&is, expected); } diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 9a2b1c41973100682f1cc8a2aad94908f1cecbe7..992f7b3519299964455141ff7c7d4d38dcc522c2 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -20,23 +20,38 @@ mod changeset; use crate::{ - backend::Backend, ChangesTrieTransaction, - changes_trie::{ - NO_EXTRINSIC_INDEX, BlockNumber, build_changes_trie, - State as ChangesTrieState, - }, + backend::Backend, stats::StateMachineStats, }; +use sp_std::vec::Vec; use self::changeset::OverlayedChangeSet; -use std::collections::HashMap; +#[cfg(feature = "std")] +use crate::{ + ChangesTrieTransaction, + changes_trie::{ + build_changes_trie, + State as ChangesTrieState, + }, +}; +use crate::changes_trie::BlockNumber; +#[cfg(feature = "std")] +use std::collections::HashMap as Map; +#[cfg(not(feature = "std"))] +use sp_std::collections::btree_map::BTreeMap as Map; +use sp_std::collections::btree_set::BTreeSet; use codec::{Decode, Encode}; use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; +#[cfg(feature = "std")] use sp_core::offchain::storage::OffchainOverlayedChanges; use hash_db::Hasher; +use crate::DefaultError; pub use self::changeset::{OverlayedValue, NoOpenTransaction, AlreadyInRuntime, NotInRuntime}; +/// Changes that are made outside of extrinsics are marked with this index; +pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; + /// Storage key. pub type StorageKey = Vec; @@ -49,6 +64,29 @@ pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; +/// Keep trace of extrinsics index for a modified value. +#[derive(Debug, Default, Eq, PartialEq, Clone)] +pub struct Extrinsics(Vec); + +impl Extrinsics { + /// Extracts extrinsics into a `BTreeSets`. + fn copy_extrinsics_into(&self, dest: &mut BTreeSet) { + dest.extend(self.0.iter()) + } + + /// Add an extrinsics. + fn insert(&mut self, ext: u32) { + if Some(&ext) != self.0.last() { + self.0.push(ext); + } + } + + /// Extend `self` with `other`. + fn extend(&mut self, other: Self) { + self.0.extend(other.0.into_iter()); + } +} + /// The set of changes that are overlaid onto the backend. /// /// It allows changes to be modified using nestable transactions. @@ -57,7 +95,7 @@ pub struct OverlayedChanges { /// Top level storage changes. top: OverlayedChangeSet, /// Child storage changes. The map key is the child storage key without the common prefix. - children: HashMap, + children: Map, /// True if extrinsics stats must be collected. collect_extrinsics: bool, /// Collect statistic on this execution. @@ -76,6 +114,7 @@ pub struct StorageChanges { /// All changes to the child storages. pub child_storage_changes: ChildStorageCollection, /// Offchain state changes to write to the offchain database. + #[cfg(feature = "std")] pub offchain_storage_changes: OffchainOverlayedChanges, /// A transaction for the backend that contains all changes from /// [`main_storage_changes`](StorageChanges::main_storage_changes) and from @@ -87,9 +126,14 @@ pub struct StorageChanges { /// Contains the transaction for the backend for the changes trie. /// /// If changes trie is disabled the value is set to `None`. + #[cfg(feature = "std")] pub changes_trie_transaction: Option>, + /// Phantom data for block number until change trie support no_std. + #[cfg(not(feature = "std"))] + pub _ph: sp_std::marker::PhantomData, } +#[cfg(feature = "std")] impl StorageChanges { /// Deconstruct into the inner values pub fn into_inner(self) -> ( @@ -120,9 +164,14 @@ pub struct StorageTransactionCache { /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, /// Contains the changes trie transaction. + #[cfg(feature = "std")] pub(crate) changes_trie_transaction: Option>>, /// The storage root after applying the changes trie transaction. + #[cfg(feature = "std")] pub(crate) changes_trie_transaction_storage_root: Option>, + /// Phantom data for block number until change trie support no_std. + #[cfg(not(feature = "std"))] + pub(crate) _ph: sp_std::marker::PhantomData, } impl StorageTransactionCache { @@ -137,8 +186,12 @@ impl Default for StorageTransactionCache Self { transaction: None, transaction_storage_root: None, + #[cfg(feature = "std")] changes_trie_transaction: None, + #[cfg(feature = "std")] changes_trie_transaction_storage_root: None, + #[cfg(not(feature = "std"))] + _ph: Default::default(), } } } @@ -148,10 +201,14 @@ impl Default for StorageChanges Self { main_storage_changes: Default::default(), child_storage_changes: Default::default(), + #[cfg(feature = "std")] offchain_storage_changes: Default::default(), transaction: Default::default(), transaction_storage_root: Default::default(), + #[cfg(feature = "std")] changes_trie_transaction: None, + #[cfg(not(feature = "std"))] + _ph: Default::default(), } } } @@ -190,7 +247,7 @@ impl OverlayedChanges { key: &[u8], init: impl Fn() -> StorageValue, ) -> &mut StorageValue { - let value = self.top.modify(key.to_owned(), init, self.extrinsic_index()); + let value = self.top.modify(key.to_vec(), init, self.extrinsic_index()); // if the value was deleted initialise it back with an empty vec value.get_or_insert_with(StorageValue::default) @@ -235,7 +292,7 @@ impl OverlayedChanges { let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| ( top.spawn_child(), - child_info.to_owned() + child_info.clone() ) ); let updatable = info.try_update(child_info); @@ -256,7 +313,7 @@ impl OverlayedChanges { let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| ( top.spawn_child(), - child_info.to_owned() + child_info.clone() ) ); let updatable = info.try_update(child_info); @@ -285,7 +342,7 @@ impl OverlayedChanges { let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| ( top.spawn_child(), - child_info.to_owned() + child_info.clone() ) ); let updatable = info.try_update(child_info); @@ -322,7 +379,7 @@ impl OverlayedChanges { /// there is no open transaction that can be rolled back. pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.top.rollback_transaction()?; - self.children.retain(|_, (changeset, _)| { + retain_map(&mut self.children, |_, (changeset, _)| { changeset.rollback_transaction() .expect("Top and children changesets are started in lockstep; qed"); !changeset.is_empty() @@ -379,7 +436,7 @@ impl OverlayedChanges { impl Iterator)>, impl Iterator)>, ChildInfo))>, ) { - use std::mem::take; + use sp_std::mem::take; ( take(&mut self.top).drain_commited(), take(&mut self.children).into_iter() @@ -409,6 +466,7 @@ impl OverlayedChanges { } /// Convert this instance with all changes into a [`StorageChanges`] instance. + #[cfg(feature = "std")] pub fn into_storage_changes< B: Backend, H: Hasher, N: BlockNumber >( @@ -417,7 +475,8 @@ impl OverlayedChanges { changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { + ) -> Result, DefaultError> + where H::Out: Ord + Encode + 'static { self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) } @@ -425,10 +484,12 @@ impl OverlayedChanges { pub fn drain_storage_changes, H: Hasher, N: BlockNumber>( &mut self, backend: &B, + #[cfg(feature = "std")] changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: &mut StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { + ) -> Result, DefaultError> + where H::Out: Ord + Encode + 'static { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { self.storage_root(backend, &mut cache); @@ -439,6 +500,7 @@ impl OverlayedChanges { .expect("Transaction was be generated as part of `storage_root`; qed"); // If the transaction does not exist, we generate it. + #[cfg(feature = "std")] if cache.changes_trie_transaction.is_none() { self.changes_trie_root( backend, @@ -449,20 +511,24 @@ impl OverlayedChanges { ).map_err(|_| "Failed to generate changes trie transaction")?; } + #[cfg(feature = "std")] let changes_trie_transaction = cache.changes_trie_transaction .take() .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); - let offchain_storage_changes = Default::default(); let (main_storage_changes, child_storage_changes) = self.drain_committed(); Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), - offchain_storage_changes, + #[cfg(feature = "std")] + offchain_storage_changes: Default::default(), transaction, transaction_storage_root, + #[cfg(feature = "std")] changes_trie_transaction, + #[cfg(not(feature = "std"))] + _ph: Default::default(), }) } @@ -520,6 +586,7 @@ impl OverlayedChanges { /// # Panics /// /// Panics on storage error, when `panic_on_storage_error` is set. + #[cfg(feature = "std")] pub fn changes_trie_root<'a, H: Hasher, N: BlockNumber, B: Backend>( &self, backend: &B, @@ -563,6 +630,29 @@ impl OverlayedChanges { } } +#[cfg(feature = "std")] +fn retain_map(map: &mut Map, f: F) + where + K: std::cmp::Eq + std::hash::Hash, + F: FnMut(&K, &mut V) -> bool, +{ + map.retain(f); +} + +#[cfg(not(feature = "std"))] +fn retain_map(map: &mut Map, mut f: F) + where + K: Ord, + F: FnMut(&K, &mut V) -> bool, +{ + let old = sp_std::mem::replace(map, Map::default()); + for (k, mut v) in old.into_iter() { + if f(&k, &mut v) { + map.insert(k, v); + } + } +} + #[cfg(test)] mod tests { use hex_literal::hex; @@ -578,7 +668,7 @@ mod tests { expected: Vec, ) { assert_eq!( - overlay.get(key.as_ref()).unwrap().extrinsics().cloned().collect::>(), + overlay.get(key.as_ref()).unwrap().extrinsics().into_iter().collect::>(), expected ) } diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index b8a35ced1eb0cbfb14add76e004a716b1ec31961..99023ec772ec351e682879ceab0e2d74b3f51e1b 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -24,7 +24,7 @@ use std::{ use crate::{Backend, StorageKey, StorageValue}; use hash_db::Hasher; use sp_core::{ - storage::ChildInfo, + storage::{ChildInfo, TrackedStorageKey}, traits::Externalities, Blake2Hasher, }; use codec::Encode; @@ -194,7 +194,11 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("reset_read_write_count is not supported in ReadOnlyExternalities") } - fn set_whitelist(&mut self, _: Vec>) { + fn get_whitelist(&self) -> Vec { + unimplemented!("get_whitelist is not supported in ReadOnlyExternalities") + } + + fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in ReadOnlyExternalities") } } diff --git a/primitives/state-machine/src/stats.rs b/primitives/state-machine/src/stats.rs index a8ca5a3b416f4b3d4d0b213b9d647edba063b7d2..f84de6a5bad0786eed1085598fdd4194b7bf8a8e 100644 --- a/primitives/state-machine/src/stats.rs +++ b/primitives/state-machine/src/stats.rs @@ -17,8 +17,9 @@ //! Usage statistics for state db +#[cfg(feature = "std")] use std::time::{Instant, Duration}; -use std::cell::RefCell; +use sp_std::cell::RefCell; /// Measured count of operations and total bytes. #[derive(Clone, Debug, Default)] @@ -50,8 +51,10 @@ pub struct UsageInfo { /// Memory used. pub memory: usize, + #[cfg(feature = "std")] /// Moment at which current statistics has been started being collected. pub started: Instant, + #[cfg(feature = "std")] /// Timespan of the statistics. pub span: Duration, } @@ -99,7 +102,9 @@ impl UsageInfo { cache_reads: UsageUnit::default(), modified_reads: UsageUnit::default(), memory: 0, + #[cfg(feature = "std")] started: Instant::now(), + #[cfg(feature = "std")] span: Default::default(), } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e0a86bbd193a1433535c42d6d3b171c803d9056d..4eaa0870baed039e96c19097e8eeddccb4bc76e8 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -17,7 +17,7 @@ //! Trie-based state machine backend. -use log::{warn, debug}; +use crate::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; @@ -27,6 +27,7 @@ use crate::{ StorageKey, StorageValue, Backend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, }; +use sp_std::{boxed::Box, vec::Vec}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { @@ -67,8 +68,8 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } } -impl, H: Hasher> std::fmt::Debug for TrieBackend { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl, H: Hasher> sp_std::fmt::Debug for TrieBackend { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } @@ -76,7 +77,7 @@ impl, H: Hasher> std::fmt::Debug for TrieBackend impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { - type Error = String; + type Error = crate::DefaultError; type Transaction = S::Overlay; type TrieBackendStorage = S; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 72864e312b6abbe65d8ce562f666fb2ae90aed14..37bbbb7cf9822f179901f9da78aa4a617e73263c 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -18,9 +18,10 @@ //! Trie-based state machine backend essence used to read values //! from storage. -use std::ops::Deref; +#[cfg(feature = "std")] use std::sync::Arc; -use log::{debug, warn}; +use sp_std::{ops::Deref, boxed::Box, vec::Vec}; +use crate::{warn, debug}; use hash_db::{self, Hasher, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, empty_child_trie_root, read_trie_value, read_child_trie_value, @@ -30,10 +31,19 @@ use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; use codec::Encode; +#[cfg(not(feature = "std"))] +macro_rules! format { + ($($arg:tt)+) => ( + crate::DefaultError + ); +} + +type Result = sp_std::result::Result; + /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } /// Patricia trie-based pairs storage essence. @@ -80,12 +90,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { + pub fn next_storage_key(&self, key: &[u8]) -> Result> { self.next_storage_key_from_root(&self.root, None, key) } /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result, String> { + fn child_root(&self, child_info: &ChildInfo) -> Result> { self.storage(child_info.prefixed_storage_key().as_slice()) } @@ -95,7 +105,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, String> { + ) -> Result> { let child_root = match self.child_root(child_info)? { Some(child_root) => child_root, None => return Ok(None), @@ -118,7 +128,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, child_info: Option<&ChildInfo>, key: &[u8], - ) -> Result, String> { + ) -> Result> { let dyn_eph: &dyn hash_db::HashDBRef<_, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { @@ -158,7 +168,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result, String> { + pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); read_trie_value::, _>(self, &self.root, key).map_err(map_e) @@ -169,7 +179,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, String> { + ) -> Result> { let root = self.child_root(child_info)? .unwrap_or_else(|| empty_child_trie_root::>().encode()); @@ -234,7 +244,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: mut f: F, child_info: Option<&ChildInfo>, ) { - let mut iter = move |db| -> Result<(), Box>> { + let mut iter = move |db| -> sp_std::result::Result<(), Box>> { let trie = TrieDB::::new(db, root)?; for x in TrieDBIterator::new_prefixed(&trie, prefix)? { @@ -337,14 +347,15 @@ pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } // This implementation is used by normal storage trie clients. +#[cfg(feature = "std")] impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Storage::::get(self.deref(), key, prefix) } } @@ -353,7 +364,7 @@ impl TrieBackendStorage for Arc> { impl TrieBackendStorage for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -361,7 +372,7 @@ impl TrieBackendStorage for PrefixedMemoryDB { impl TrieBackendStorage for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index a43d1997f7f7c981d1e2f7fef40cb8740651ee9b..5b988cabc150acfbf166c06c3a3de49b5b4eaff0 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-std" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -9,6 +9,7 @@ repository = "https://github.com/paritytech/substrate/" description = "Lowest-abstraction level for the Substrate runtime: just exports useful primitives from std or client/alloc to be used with any code that depends on the runtime." documentation = "https://docs.rs/sp-std" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/std/README.md b/primitives/std/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6dddd8fbbdd9d2fade460195ac2d9cee0eaf35ad --- /dev/null +++ b/primitives/std/README.md @@ -0,0 +1,4 @@ +Lowest-abstraction level for the Substrate runtime: just exports useful primitives from std +or client/alloc to be used with any code that depends on the runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/std/src/lib.rs b/primitives/std/src/lib.rs index 8ff1efc63d8df58c673a98f3e48851973f19a574..b323c43720da1fba661980c972c686014836f0a8 100644 --- a/primitives/std/src/lib.rs +++ b/primitives/std/src/lib.rs @@ -20,7 +20,6 @@ #![cfg_attr(not(feature = "std"), no_std)] - #![cfg_attr(feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] #![cfg_attr(not(feature = "std"), @@ -65,6 +64,30 @@ include!("../with_std.rs"); #[cfg(not(feature = "std"))] include!("../without_std.rs"); + +/// A target for `core::write!` macro - constructs a string in memory. +#[derive(Default)] +pub struct Writer(vec::Vec); + +impl fmt::Write for Writer { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.0.extend(s.as_bytes()); + Ok(()) + } +} + +impl Writer { + /// Access the content of this `Writer` e.g. for printout + pub fn inner(&self) -> &vec::Vec { + &self.0 + } + + /// Convert into the content of this `Writer` + pub fn into_inner(self) -> vec::Vec { + self.0 + } +} + /// Prelude of common useful imports. /// /// This should include only things which are in the normal std prelude. diff --git a/primitives/std/with_std.rs b/primitives/std/with_std.rs index e1994e764d23e340c0332e500bd933aa84f6e017..92e804b27e1d028beb44f6b6f5d83deb0973240f 100644 --- a/primitives/std/with_std.rs +++ b/primitives/std/with_std.rs @@ -44,3 +44,7 @@ pub mod collections { pub use std::collections::btree_set; pub use std::collections::vec_deque; } + +pub mod thread { + pub use std::thread::panicking; +} diff --git a/primitives/std/without_std.rs b/primitives/std/without_std.rs index 09f7a1976cc022078d0a5975ccc736cc2edb08a3..3c130d547a1e4a8972ff4b46b4d5e60d2fa07385 100755 --- a/primitives/std/without_std.rs +++ b/primitives/std/without_std.rs @@ -53,3 +53,12 @@ pub mod borrow { pub use core::borrow::*; pub use alloc::borrow::*; } + +pub mod thread { + /// Returns if the current thread is panicking. + /// + /// In wasm this always returns `false`, as we abort on any panic. + pub fn panicking() -> bool { + false + } +} diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index cb7f2daa50e83b678605015ccfc9e692578c564c..4f14ba38f2147e345f0a0dfca64992e937e3cc91 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-storage" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" @@ -8,17 +8,19 @@ license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-storage/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -impl-serde = { version = "0.2.3", optional = true } +impl-serde = { version = "0.3.1", optional = true } ref-cast = "1.0.0" -sp-debug-derive = { version = "2.0.0-rc5", path = "../debug-derive" } +sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde" ] +std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] diff --git a/primitives/storage/README.md b/primitives/storage/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c33144fc4f66202804594d6cf0cc6886b5cad339 --- /dev/null +++ b/primitives/storage/README.md @@ -0,0 +1,3 @@ +Primitive types for storage related stuff. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 073d80291c13e5f8aae9a180dbce8f419ec3a626..b253733e7b29e40f8eeb3cffc6372e7a55bdea29 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -25,6 +25,7 @@ use sp_debug_derive::RuntimeDebug; use sp_std::{vec::Vec, ops::{Deref, DerefMut}}; use ref_cast::RefCast; +use codec::{Encode, Decode}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -34,6 +35,26 @@ pub struct StorageKey( pub Vec, ); +/// Storage key with read/write tracking information. +#[derive(PartialEq, Eq, RuntimeDebug, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Hash, PartialOrd, Ord))] +pub struct TrackedStorageKey { + pub key: Vec, + pub has_been_read: bool, + pub has_been_written: bool, +} + +// Easily convert a key to a `TrackedStorageKey` that has been read and written to. +impl From> for TrackedStorageKey { + fn from(key: Vec) -> Self { + Self { + key: key, + has_been_read: true, + has_been_written: true, + } + } +} + /// Storage key of a child trie, it contains the prefix to the key. #[derive(PartialEq, Eq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 1101dd9ccc699dd13da1b422bb530dde0becb654..18468a33ae42e68e4832f5b199ced93c48a70b85 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-test-primitives" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,11 +12,11 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../application-crypto" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } [features] diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 794729f775435504e9bc9a7568627c71a4ac8f72..79dae291022203393e29ebbc1468441434b2fc0e 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -1,22 +1,23 @@ [package] name = "sp-timestamp" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Substrate core types and inherents for timestamps." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0", default-features = false, path = "../api" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } impl-trait-for-tuples = "0.1.3" wasm-timer = { version = "0.2", optional = true } diff --git a/primitives/timestamp/README.md b/primitives/timestamp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a61a776912c93b4ab55525cba2d30146466ff905 --- /dev/null +++ b/primitives/timestamp/README.md @@ -0,0 +1,3 @@ +Substrate core types and inherents for timestamps. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 03bec79685eda6407e9b69d194b759c7bfbcf417..a1d89af58c019ef51b0a4c501dab2c9032472af2 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -1,21 +1,42 @@ [package] name = "sp-tracing" -version = "2.0.0-rc5" +version = "2.0.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Instrumentation primitives and macros for Substrate." +readme = "README.md" [package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] +# let's default to wasm32 +default-target = "wasm32-unknown-unknown" +# with the tracing enabled +features = ["with-tracing"] +# allowing for linux-gnu here, too, allows for `std` to show up as well +targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] [dependencies] -tracing = { version = "0.1.18", optional = true } -rental = { version = "0.5.5", optional = true } +sp-std = { version = "2.0.0", path = "../std", default-features = false} +codec = { version = "1.3.1", package = "parity-scale-codec", default-features = false, features = ["derive"]} +tracing = { version = "0.1.19", default-features = false } +tracing-core = { version = "0.1.16", default-features = false } log = { version = "0.4.8", optional = true } +tracing-subscriber = { version = "0.2.10", optional = true, features = ["tracing-log"] } [features] default = [ "std" ] -std = [ "tracing", "rental", "log" ] +with-tracing = [ + "codec/derive", + "codec/full", +] +std = [ + "with-tracing", + "tracing/std", + "tracing-core/std", + "codec/std", + "sp-std/std", + "log", + "tracing-subscriber", +] diff --git a/primitives/tracing/README.md b/primitives/tracing/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a93c97ff62fab4b4cc61b3aaba97169c8b068bbc --- /dev/null +++ b/primitives/tracing/README.md @@ -0,0 +1,15 @@ +Substrate tracing primitives and macros. + +To trace functions or invidual code in Substrate, this crate provides [`within_span`] +and [`enter_span`]. See the individual docs for how to use these macros. + +Note that to allow traces from wasm execution environment there are +2 reserved identifiers for tracing `Field` recording, stored in the consts: +`WASM_TARGET_KEY` and `WASM_NAME_KEY` - if you choose to record fields, you +must ensure that your identifiers do not clash with either of these. + +Additionally, we have a const: `WASM_TRACE_IDENTIFIER`, which holds a span name used +to signal that the 'actual' span name and target should be retrieved instead from +the associated Fields mentioned above. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/tracing/src/lib.rs b/primitives/tracing/src/lib.rs index e82d8861cd3f5587dc5c8b0de9808063f18410cd..fb074d5579c8277b50c310f66f5e7e240177679c 100644 --- a/primitives/tracing/src/lib.rs +++ b/primitives/tracing/src/lib.rs @@ -17,7 +17,7 @@ //! Substrate tracing primitives and macros. //! -//! To trace functions or invidual code in Substrate, this crate provides [`tracing_span`] +//! To trace functions or invidual code in Substrate, this crate provides [`within_span`] //! and [`enter_span`]. See the individual docs for how to use these macros. //! //! Note that to allow traces from wasm execution environment there are @@ -28,90 +28,209 @@ //! Additionally, we have a const: `WASM_TRACE_IDENTIFIER`, which holds a span name used //! to signal that the 'actual' span name and target should be retrieved instead from //! the associated Fields mentioned above. + #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -#[macro_use] -extern crate rental; +/// Tracing facilities and helpers. +/// +/// This is modeled after the `tracing`/`tracing-core` interface and uses that more or +/// less directly for the native side. Because of certain optimisations the these crates +/// have done, the wasm implementation diverges slightly and is optimised for thtat use +/// case (like being able to cross the wasm/native boundary via scale codecs). +/// +/// One of said optimisations is that all macros will yield to a `noop` in non-std unless +/// the `with-tracing` feature is explicitly activated. This allows you to just use the +/// tracing wherever you deem fit and without any performance impact by default. Only if +/// the specific `with-tracing`-feature is activated on this crate will it actually include +/// the tracing code in the non-std environment. +/// +/// Because of that optimisation, you should not use the `span!` and `span_*!` macros +/// directly as they yield nothing without the feature present. Instead you should use +/// `enter_span!` and `within_span!` – which would strip away even any parameter conversion +/// you do within the span-definition (and thus optimise your performance). For your +/// convineience you directly specify the `Level` and name of the span or use the full +/// feature set of `span!`/`span_*!` on it: +/// +/// # Example +/// +/// ```rust +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "fn wide span"); +/// { +/// sp_tracing::enter_span!(sp_tracing::trace_span!("outer-span")); +/// { +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); +/// // .. +/// } // inner span exists here +/// } // outer span exists here +/// +/// sp_tracing::within_span! { +/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); +/// 1 + 1; +/// // some other complex code +/// } // debug span ends here +/// +/// ``` +/// +/// +/// # Setup +/// +/// This project only provides the macros and facilities to manage tracing +/// it doesn't implement the tracing subscriber or backend directly – that is +/// up to the developer integrating it into a specific environment. In native +/// this can and must be done through the regular `tracing`-facitilies, please +/// see their documentation for details. +/// +/// On the wasm-side we've adopted a similar approach of having a global +/// `TracingSubscriber` that the macros call and that does the actual work +/// of tracking. To provide your tracking, you must implement `TracingSubscriber` +/// and call `set_tracing_subscriber` at the very beginning of your execution – +/// the default subscriber is doing nothing, so any spans or events happening before +/// will not be recorded! +/// -#[cfg(feature = "std")] -#[doc(hidden)] -pub use tracing; +mod types; #[cfg(feature = "std")] -pub mod proxy; +use tracing; + +pub use tracing::{ + debug, debug_span, error, error_span, info, info_span, trace, trace_span, warn, warn_span, + span, event, Level, Span, +}; +pub use crate::types::{ + WasmMetadata, WasmEntryAttributes, WasmValuesSet, WasmValue, WasmFields, WasmLevel, WasmFieldName +}; + + +/// Try to init a simple tracing subscriber with log compatibility layer. +/// Ignores any error. Useful for testing. #[cfg(feature = "std")] -use std::sync::atomic::{AtomicBool, Ordering}; +pub fn try_init_simple() { + let _ = tracing_subscriber::fmt().with_writer(std::io::stderr).try_init(); +} -/// Flag to signal whether to run wasm tracing #[cfg(feature = "std")] -static WASM_TRACING_ENABLED: AtomicBool = AtomicBool::new(false); +pub use crate::types::{ + WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER +}; + /// Runs given code within a tracing span, measuring it's execution time. /// -/// If tracing is not enabled, the code is still executed. +/// If tracing is not enabled, the code is still executed. Pass in level and name or +/// use any valid `sp_tracing::Span`followe by `;` and the code to execute, /// /// # Example /// /// ``` -/// sp_tracing::tracing_span! { +/// sp_tracing::within_span! { +/// sp_tracing::Level::TRACE, /// "test-span"; /// 1 + 1; /// // some other complex code /// } +/// +/// sp_tracing::within_span! { +/// sp_tracing::span!(sp_tracing::Level::WARN, "warn-span", you_can_pass="any params"); +/// 1 + 1; +/// // some other complex code +/// } +/// +/// sp_tracing::within_span! { +/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); +/// 1 + 1; +/// // some other complex code +/// } /// ``` +#[cfg(any(feature = "std", feature = "with-tracing"))] #[macro_export] -macro_rules! tracing_span { +macro_rules! within_span { ( + $span:expr; + $( $code:tt )* + ) => { + $span.in_scope(|| + { + $( $code )* + } + ) + }; + ( + $lvl:expr, $name:expr; $( $code:tt )* ) => { { - $crate::enter_span!($name); - $( $code )* + $crate::within_span!($crate::span!($crate::Level::TRACE, $name); $( $code )*) } - } + }; +} + +#[cfg(all(not(feature = "std"), not(feature = "with-tracing")))] +#[macro_export] +macro_rules! within_span { + ( + $span:stmt; + $( $code:tt )* + ) => { + $( $code )* + }; + ( + $lvl:expr, + $name:expr; + $( $code:tt )* + ) => { + $( $code )* + }; +} + + +/// Enter a span - noop for `no_std` without `with-tracing` +#[cfg(all(not(feature = "std"), not(feature = "with-tracing")))] +#[macro_export] +macro_rules! enter_span { + ( $lvl:expr, $name:expr ) => ( ); + ( $name:expr ) => ( ) // no-op } /// Enter a span. /// -/// The span will be valid, until the scope is left. +/// The span will be valid, until the scope is left. Use either level and name +/// or pass in any valid `sp_tracing::Span` for extended usage. The span will +/// be exited on drop – which is at the end of the block or to the next +/// `enter_span!` calls, as this overwrites the local variable. For nested +/// usage or to ensure the span closes at certain time either put it into a block +/// or use `within_span!` /// /// # Example /// /// ``` -/// sp_tracing::enter_span!("test-span"); +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "test-span"); +/// // previous will be dropped here +/// sp_tracing::enter_span!( +/// sp_tracing::span!(sp_tracing::Level::DEBUG, "debug-span", params="value")); +/// sp_tracing::enter_span!(sp_tracing::info_span!("info-span", params="value")); +/// +/// { +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "outer-span"); +/// { +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); +/// // .. +/// } // inner span exists here +/// } // outer span exists here +/// /// ``` +#[cfg(any(feature = "std", feature = "with-tracing"))] #[macro_export] macro_rules! enter_span { - ( $name:expr ) => { - let __tracing_span__ = $crate::if_tracing!( - $crate::tracing::span!($crate::tracing::Level::TRACE, $name) - ); - let __tracing_guard__ = $crate::if_tracing!(__tracing_span__.enter()); - } -} - -/// Generates the given code if the tracing dependency is enabled. -#[macro_export] -#[cfg(feature = "std")] -macro_rules! if_tracing { - ( $if:expr ) => {{ $if }} + ( $span:expr ) => { + // Calling this twice in a row will overwrite (and drop) the earlier + // that is a _documented feature_! + let __within_span__ = $span; + let __tracing_guard__ = __within_span__.enter(); + }; + ( $lvl:expr, $name:expr ) => { + $crate::enter_span!($crate::span!($crate::Level::TRACE, $name)) + }; } - -#[macro_export] -#[cfg(not(feature = "std"))] -macro_rules! if_tracing { - ( $if:expr ) => {{}} -} - -#[cfg(feature = "std")] -pub fn wasm_tracing_enabled() -> bool { - WASM_TRACING_ENABLED.load(Ordering::Relaxed) -} - -#[cfg(feature = "std")] -pub fn set_wasm_tracing(b: bool) { - WASM_TRACING_ENABLED.store(b, Ordering::Relaxed) -} \ No newline at end of file diff --git a/primitives/tracing/src/proxy.rs b/primitives/tracing/src/proxy.rs deleted file mode 100644 index 270f57aaa69f05f857b61300b36a4524276fe76a..0000000000000000000000000000000000000000 --- a/primitives/tracing/src/proxy.rs +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Proxy to allow entering tracing spans from wasm. -//! -//! Use `enter_span` and `exit_span` to surround the code that you wish to trace -use rental; -use tracing::info_span; - -/// Used to identify a proxied WASM trace -pub const WASM_TRACE_IDENTIFIER: &'static str = "WASM_TRACE"; -/// Used to extract the real `target` from the associated values of the span -pub const WASM_TARGET_KEY: &'static str = "proxied_wasm_target"; -/// Used to extract the real `name` from the associated values of the span -pub const WASM_NAME_KEY: &'static str = "proxied_wasm_name"; - -const MAX_SPANS_LEN: usize = 1000; - -rental! { - pub mod rent_span { - #[rental] - pub struct SpanAndGuard { - span: Box, - guard: tracing::span::Entered<'span>, - } - } -} - -/// Requires a tracing::Subscriber to process span traces, -/// this is available when running with client (and relevant cli params). -pub struct TracingProxy { - next_id: u64, - spans: Vec<(u64, rent_span::SpanAndGuard)>, -} - -impl Drop for TracingProxy { - fn drop(&mut self) { - if !self.spans.is_empty() { - log::debug!( - target: "tracing", - "Dropping TracingProxy with {} un-exited spans, marking as not valid", self.spans.len() - ); - while let Some((_, mut sg)) = self.spans.pop() { - sg.rent_all_mut(|s| { s.span.record("is_valid_trace", &false); }); - } - } - } -} - -impl TracingProxy { - pub fn new() -> TracingProxy { - TracingProxy { - next_id: 0, - spans: Vec::new(), - } - } -} - -impl TracingProxy { - /// Create and enter a `tracing` Span, returning the span id, - /// which should be passed to `exit_span(id)` to signal that the span should exit. - pub fn enter_span(&mut self, proxied_wasm_target: &str, proxied_wasm_name: &str) -> u64 { - // The identifiers `proxied_wasm_target` and `proxied_wasm_name` must match their associated const, - // WASM_TARGET_KEY and WASM_NAME_KEY. - let span = info_span!(WASM_TRACE_IDENTIFIER, is_valid_trace = true, proxied_wasm_target, proxied_wasm_name); - self.next_id += 1; - let sg = rent_span::SpanAndGuard::new( - Box::new(span), - |span| span.enter(), - ); - self.spans.push((self.next_id, sg)); - if self.spans.len() > MAX_SPANS_LEN { - // This is to prevent unbounded growth of Vec and could mean one of the following: - // 1. Too many nested spans, or MAX_SPANS_LEN is too low. - // 2. Not correctly exiting spans due to misconfiguration / misuse - log::warn!( - target: "tracing", - "TracingProxy MAX_SPANS_LEN exceeded, removing oldest span." - ); - let mut sg = self.spans.remove(0).1; - sg.rent_all_mut(|s| { s.span.record("is_valid_trace", &false); }); - } - self.next_id - } - - /// Exit a span by dropping it along with it's associated guard. - pub fn exit_span(&mut self, id: u64) { - if self.spans.last().map(|l| id > l.0).unwrap_or(true) { - log::warn!(target: "tracing", "Span id not found in TracingProxy: {}", id); - return; - } - let mut last_span = self.spans.pop().expect("Just checked that there is an element to pop; qed"); - while id < last_span.0 { - log::warn!( - target: "tracing", - "TracingProxy Span ids not equal! id parameter given: {}, last span: {}", - id, - last_span.0, - ); - last_span.1.rent_all_mut(|s| { s.span.record("is_valid_trace", &false); }); - if let Some(s) = self.spans.pop() { - last_span = s; - } else { - log::warn!(target: "tracing", "Span id not found in TracingProxy {}", id); - return; - } - } - } -} - - -#[cfg(test)] -mod tests { - use super::*; - - fn create_spans(proxy: &mut TracingProxy, qty: usize) -> Vec { - let mut spans = Vec::new(); - for n in 0..qty { - spans.push(proxy.enter_span("target", &format!("{}", n))); - } - spans - } - - #[test] - fn max_spans_len_respected() { - let mut proxy = TracingProxy::new(); - let _spans = create_spans(&mut proxy, MAX_SPANS_LEN + 10); - assert_eq!(proxy.spans.len(), MAX_SPANS_LEN); - // ensure oldest spans removed - assert_eq!(proxy.spans[0].0, 11); - } - - #[test] - fn handles_span_exit_scenarios() { - let mut proxy = TracingProxy::new(); - let _spans = create_spans(&mut proxy, 10); - assert_eq!(proxy.spans.len(), 10); - // exit span normally - proxy.exit_span(10); - assert_eq!(proxy.spans.len(), 9); - // skip and exit outer span without exiting inner, id: 8 instead of 9 - proxy.exit_span(8); - // should have also removed the inner span that was lost - assert_eq!(proxy.spans.len(), 7); - // try to exit span not held - proxy.exit_span(9); - assert_eq!(proxy.spans.len(), 7); - // exit all spans - proxy.exit_span(1); - assert_eq!(proxy.spans.len(), 0); - } -} diff --git a/primitives/tracing/src/types.rs b/primitives/tracing/src/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..050ac4c31416697c42bea21f0823c231287417a8 --- /dev/null +++ b/primitives/tracing/src/types.rs @@ -0,0 +1,623 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Types for wasm based tracing. Loosly inspired by `tracing-core` but +/// optimised for the specific use case. + +use core::{format_args, fmt::Debug}; +use sp_std::{ + vec, vec::Vec, +}; +use sp_std::Writer; +use codec::{Encode, Decode}; + +/// The Tracing Level – the user can filter by this +#[derive(Clone, Encode, Decode, Debug)] +pub enum WasmLevel { + /// This is a fatal errors + ERROR, + /// This is a warning you should be aware of + WARN, + /// Nice to now info + INFO, + /// Further information for debugging purposes + DEBUG, + /// The lowest level, keeping track of minute detail + TRACE +} + + +impl From<&tracing_core::Level> for WasmLevel { + fn from(l: &tracing_core::Level) -> WasmLevel { + match l { + &tracing_core::Level::ERROR => WasmLevel::ERROR, + &tracing_core::Level::WARN => WasmLevel::WARN, + &tracing_core::Level::INFO => WasmLevel::INFO, + &tracing_core::Level::DEBUG => WasmLevel::DEBUG, + &tracing_core::Level::TRACE => WasmLevel::TRACE, + } + } +} + + + +impl core::default::Default for WasmLevel { + fn default() -> Self { + WasmLevel::TRACE + } +} + +/// A paramter value provided to the span/event +#[derive(Encode, Decode, Clone)] +pub enum WasmValue { + U8(u8), + I8(i8), + U32(u32), + I32(i32), + I64(i64), + U64(u64), + Bool(bool), + Str(Vec), + /// Debug or Display call, this is most-likely a print-able UTF8 String + Formatted(Vec), + /// SCALE CODEC encoded object – the name should allow the received to know + /// how to decode this. + Encoded(Vec), +} + +impl core::fmt::Debug for WasmValue { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match self { + WasmValue::U8(ref i) => { + f.write_fmt(format_args!("{}_u8", i)) + } + WasmValue::I8(ref i) => { + f.write_fmt(format_args!("{}_i8", i)) + } + WasmValue::U32(ref i) => { + f.write_fmt(format_args!("{}_u32", i)) + } + WasmValue::I32(ref i) => { + f.write_fmt(format_args!("{}_i32", i)) + } + WasmValue::I64(ref i) => { + f.write_fmt(format_args!("{}_i64", i)) + } + WasmValue::U64(ref i) => { + f.write_fmt(format_args!("{}_u64", i)) + } + WasmValue::Bool(ref i) => { + f.write_fmt(format_args!("{}_bool", i)) + } + WasmValue::Formatted(ref i) | WasmValue::Str(ref i) => { + if let Ok(v) = core::str::from_utf8(i) { + f.write_fmt(format_args!("{}", v)) + } else { + f.write_fmt(format_args!("{:?}", i)) + } + } + WasmValue::Encoded(ref v) => { + f.write_str("Scale(")?; + for byte in v { + f.write_fmt(format_args!("{:02x}", byte))?; + } + f.write_str(")") + } + } + } +} + +impl From for WasmValue { + fn from(u: u8) -> WasmValue { + WasmValue::U8(u) + } +} + +impl From<&i8> for WasmValue { + fn from(inp: &i8) -> WasmValue { + WasmValue::I8(inp.clone()) + } +} + +impl From<&str> for WasmValue { + fn from(inp: &str) -> WasmValue { + WasmValue::Str(inp.as_bytes().to_vec()) + } +} + +impl From<&&str> for WasmValue { + fn from(inp: &&str) -> WasmValue { + WasmValue::Str((*inp).as_bytes().to_vec()) + } +} + +impl From for WasmValue { + fn from(inp: bool) -> WasmValue { + WasmValue::Bool(inp) + } +} + +impl From> for WasmValue { + fn from(inp: core::fmt::Arguments<'_>) -> WasmValue { + let mut buf = Writer::default(); + core::fmt::write(&mut buf, inp).expect("Writing of arguments doesn't fail"); + WasmValue::Formatted(buf.into_inner()) + } +} + +impl From for WasmValue { + fn from(u: i8) -> WasmValue { + WasmValue::I8(u) + } +} + +impl From for WasmValue { + fn from(u: i32) -> WasmValue { + WasmValue::I32(u) + } +} + +impl From<&i32> for WasmValue { + fn from(u: &i32) -> WasmValue { + WasmValue::I32(*u) + } +} + +impl From for WasmValue { + fn from(u: u32) -> WasmValue { + WasmValue::U32(u) + } +} + +impl From<&u32> for WasmValue { + fn from(u: &u32) -> WasmValue { + WasmValue::U32(*u) + } +} + +impl From for WasmValue { + fn from(u: u64) -> WasmValue { + WasmValue::U64(u) + } +} + +impl From for WasmValue { + fn from(u: i64) -> WasmValue { + WasmValue::I64(u) + } +} + +/// The name of a field provided as the argument name when contstructing an +/// `event!` or `span!`. +/// Generally generated automaticaly via `stringify` from an `'static &str`. +/// Likely print-able. +#[derive(Encode, Decode, Clone)] +pub struct WasmFieldName(Vec); + +impl core::fmt::Debug for WasmFieldName { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + if let Ok(v) = core::str::from_utf8(&self.0) { + f.write_fmt(format_args!("{}", v)) + } else { + for byte in self.0.iter() { + f.write_fmt(format_args!("{:02x}", byte))?; + } + Ok(()) + } + } +} + +impl From> for WasmFieldName { + fn from(v: Vec) -> Self { + WasmFieldName(v) + } +} + +impl From<&str> for WasmFieldName { + fn from(v: &str) -> Self { + WasmFieldName(v.as_bytes().to_vec()) + } +} + +/// A list of `WasmFieldName`s in the order provided +#[derive(Encode, Decode, Clone, Debug)] +pub struct WasmFields(Vec); + +impl WasmFields { + /// Iterate over the fields + pub fn iter(&self) -> core::slice::Iter<'_, WasmFieldName> { + self.0.iter() + } +} + +impl From> for WasmFields { + fn from(v: Vec) -> WasmFields { + WasmFields(v.into()) + } +} + +impl From> for WasmFields { + fn from(v: Vec<&str>) -> WasmFields { + WasmFields(v.into_iter().map(|v| v.into()).collect()) + } +} + +impl WasmFields { + /// Create an empty entry + pub fn empty() -> Self { + WasmFields(Vec::with_capacity(0)) + } +} + +impl From<&tracing_core::field::FieldSet> for WasmFields { + fn from(wm: &tracing_core::field::FieldSet) -> WasmFields { + WasmFields(wm.iter().map(|s| s.name().into()).collect()) + } +} + +/// A list of `WasmFieldName`s with the given `WasmValue` (if provided) +/// in the order specified. +#[derive(Encode, Decode, Clone)] +pub struct WasmValuesSet(Vec<(WasmFieldName, Option)>); + +impl core::fmt::Debug for WasmValuesSet { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut wrt = f.debug_struct(""); + let mut non_str = false; + for (f, v) in self.0.iter() { + if let Ok(s) = core::str::from_utf8(&f.0) { + match v { + Some(ref i) => wrt.field(s, i), + None => wrt.field(s, &(None as Option)), + }; + } else { + non_str = true; + } + } + + // FIXME: replace with using `finish_non_exhaustive()` once stable + // https://github.com/rust-lang/rust/issues/67364 + if non_str { + wrt.field("..", &".."); + } + + wrt.finish() + } +} + + +impl From)>> for WasmValuesSet { + fn from(v: Vec<(WasmFieldName, Option)>) -> Self { + WasmValuesSet(v) + } +} +impl From)>> for WasmValuesSet { + fn from(v: Vec<(&&WasmFieldName, Option)>) -> Self { + WasmValuesSet(v.into_iter().map(|(k, v)| ((**k).clone(), v)).collect()) + } +} + +impl From)>> for WasmValuesSet { + fn from(v: Vec<(&&str, Option)>) -> Self { + WasmValuesSet(v.into_iter().map(|(k, v)| ((*k).into(), v)).collect()) + } +} + +impl WasmValuesSet { + /// Create an empty entry + pub fn empty() -> Self { + WasmValuesSet(Vec::with_capacity(0)) + } +} + +impl tracing_core::field::Visit for WasmValuesSet { + fn record_debug(&mut self, field: &tracing_core::field::Field, value: &dyn Debug) { + self.0.push( ( + field.name().into(), + Some(WasmValue::from(format_args!("{:?}", value))) + )) + } + fn record_i64(&mut self, field: &tracing_core::field::Field, value: i64) { + self.0.push( ( + field.name().into(), + Some(WasmValue::from(value)) + )) + } + fn record_u64(&mut self, field: &tracing_core::field::Field, value: u64) { + self.0.push( ( + field.name().into(), + Some(WasmValue::from(value)) + )) + } + fn record_bool(&mut self, field: &tracing_core::field::Field, value: bool) { + self.0.push( ( + field.name().into(), + Some(WasmValue::from(value)) + )) + } + fn record_str(&mut self, field: &tracing_core::field::Field, value: &str) { + self.0.push( ( + field.name().into(), + Some(WasmValue::from(value)) + )) + } +} +/// Metadata provides generic information about the specifc location of the +/// `span!` or `event!` call on the wasm-side. +#[derive(Encode, Decode, Clone)] +pub struct WasmMetadata { + /// The name given to `event!`/`span!`, `&'static str` converted to bytes + pub name: Vec, + /// The given target to `event!`/`span!` – or module-name, `&'static str` converted to bytes + pub target: Vec, + /// The level of this entry + pub level: WasmLevel, + /// The file this was emitted from – useful for debugging; `&'static str` converted to bytes + pub file: Vec, + /// The specific line number in the file – useful for debugging + pub line: u32, + /// The module path; `&'static str` converted to bytes + pub module_path: Vec, + /// Whether this is a call to `span!` or `event!` + pub is_span: bool, + /// The list of fields specified in the call + pub fields: WasmFields, +} + +impl From<&tracing_core::Metadata<'_>> for WasmMetadata { + fn from(wm: &tracing_core::Metadata<'_>) -> WasmMetadata { + WasmMetadata { + name: wm.name().as_bytes().to_vec(), + target: wm.target().as_bytes().to_vec(), + level: wm.level().into(), + file: wm.file().map(|f| f.as_bytes().to_vec()).unwrap_or_default(), + line: wm.line().unwrap_or_default(), + module_path: wm.module_path().map(|m| m.as_bytes().to_vec()).unwrap_or_default(), + is_span: wm.is_span(), + fields: wm.fields().into() + } + } +} + +impl core::fmt::Debug for WasmMetadata { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("WasmMetadata") + .field("name", &decode_field(&self.name)) + .field("target", &decode_field(&self.target)) + .field("level", &self.level) + .field("file", &decode_field(&self.file)) + .field("line", &self.line) + .field("module_path", &decode_field(&self.module_path)) + .field("is_span", &self.is_span) + .field("fields", &self.fields) + .finish() + } +} + +impl core::default::Default for WasmMetadata { + fn default() -> Self { + let target = "default".as_bytes().to_vec(); + WasmMetadata { + target, + name: Default::default(), + level: Default::default(), + file: Default::default(), + line: Default::default(), + module_path: Default::default(), + is_span: true, + fields: WasmFields::empty() + } + } +} + + +fn decode_field(field: &[u8]) -> &str { + core::str::from_utf8(field).unwrap_or_default() +} + +/// Span or Event Attributes +#[derive(Encode, Decode, Clone, Debug)] +pub struct WasmEntryAttributes { + /// the parent, if directly specified – otherwise assume most inner span + pub parent_id: Option, + /// the metadata of the location + pub metadata: WasmMetadata, + /// the Values provided + pub fields: WasmValuesSet, +} + +impl From<&tracing_core::Event<'_>> for WasmEntryAttributes { + fn from(evt: &tracing_core::Event<'_>) -> WasmEntryAttributes { + let mut fields = WasmValuesSet(Vec::new()); + evt.record(&mut fields); + WasmEntryAttributes { + parent_id: evt.parent().map(|id| id.into_u64()), + metadata: evt.metadata().into(), + fields: fields + } + } +} + +impl From<&tracing_core::span::Attributes<'_>> for WasmEntryAttributes { + fn from(attrs: &tracing_core::span::Attributes<'_>) -> WasmEntryAttributes { + let mut fields = WasmValuesSet(Vec::new()); + attrs.record(&mut fields); + WasmEntryAttributes { + parent_id: attrs.parent().map(|id| id.into_u64()), + metadata: attrs.metadata().into(), + fields: fields + } + } +} + +impl core::default::Default for WasmEntryAttributes { + fn default() -> Self { + WasmEntryAttributes { + parent_id: None, + metadata: Default::default(), + fields: WasmValuesSet(vec![]), + } + } +} + +#[cfg(feature = "std")] +mod std_features { + + use tracing_core::callsite; + use tracing; + + /// Static entry use for wasm-originated metadata. + pub struct WasmCallsite; + impl callsite::Callsite for WasmCallsite { + fn set_interest(&self, _: tracing_core::Interest) { unimplemented!() } + fn metadata(&self) -> &tracing_core::Metadata { unimplemented!() } + } + static CALLSITE: WasmCallsite = WasmCallsite; + /// The identifier we are using to inject the wasm events in the generic `tracing` system + pub static WASM_TRACE_IDENTIFIER: &'static str = "wasm_tracing"; + /// The fieldname for the wasm-originated name + pub static WASM_NAME_KEY: &'static str = "name"; + /// The fieldname for the wasm-originated target + pub static WASM_TARGET_KEY: &'static str = "target"; + /// The the list of all static field names we construct from the given metadata + pub static GENERIC_FIELDS: &'static [&'static str] = &[WASM_TARGET_KEY, WASM_NAME_KEY, + "file", "line", "module_path", "params"]; + + // Implementation Note: + // the original `tracing` crate generates these static metadata entries at every `span!` and + // `event!` location to allow for highly optimised filtering. For us to allow level-based emitting + // of wasm events we need these static metadata entries to inject into that system. We then provide + // generic `From`-implementations picking the right metadata to refer to. + + static SPAN_ERROR_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::ERROR, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::SPAN + ); + + static SPAN_WARN_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::WARN, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::SPAN + ); + static SPAN_INFO_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::INFO, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::SPAN + ); + + static SPAN_DEBUG_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::DEBUG, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::SPAN + ); + + static SPAN_TRACE_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::TRACE, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::SPAN + ); + + static EVENT_ERROR_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::ERROR, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::EVENT + ); + + static EVENT_WARN_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::WARN, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::EVENT + ); + + static EVENT_INFO_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::INFO, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::EVENT + ); + + static EVENT_DEBUG_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::DEBUG, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::EVENT + ); + + static EVENT_TRACE_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::TRACE, None, None, None, + tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), + tracing_core::metadata::Kind::EVENT + ); + + // FIXME: this could be done a lot in 0.2 if they opt for using `Cow` instead + // https://github.com/paritytech/substrate/issues/7134 + impl From<&crate::WasmMetadata> for &'static tracing_core::Metadata<'static> { + fn from(wm: &crate::WasmMetadata) -> &'static tracing_core::Metadata<'static> { + match (&wm.level, wm.is_span) { + (&crate::WasmLevel::ERROR, true) => &SPAN_ERROR_METADATA, + (&crate::WasmLevel::WARN, true) => &SPAN_WARN_METADATA, + (&crate::WasmLevel::INFO, true) => &SPAN_INFO_METADATA, + (&crate::WasmLevel::DEBUG, true) => &SPAN_DEBUG_METADATA, + (&crate::WasmLevel::TRACE, true) => &SPAN_TRACE_METADATA, + (&crate::WasmLevel::ERROR, false) => &EVENT_ERROR_METADATA, + (&crate::WasmLevel::WARN, false) => &EVENT_WARN_METADATA, + (&crate::WasmLevel::INFO, false) => &EVENT_INFO_METADATA, + (&crate::WasmLevel::DEBUG, false) => &EVENT_DEBUG_METADATA, + (&crate::WasmLevel::TRACE, false) => &EVENT_TRACE_METADATA, + } + } + } + + impl From for tracing::Span { + fn from(a: crate::WasmEntryAttributes) -> tracing::Span { + let name = std::str::from_utf8(&a.metadata.name).unwrap_or_default(); + let target = std::str::from_utf8(&a.metadata.target).unwrap_or_default(); + let file = std::str::from_utf8(&a.metadata.file).unwrap_or_default(); + let line = a.metadata.line; + let module_path = std::str::from_utf8(&a.metadata.module_path).unwrap_or_default(); + let params = a.fields; + let metadata : &tracing_core::metadata::Metadata<'static> = (&a.metadata).into(); + + tracing::span::Span::child_of( + a.parent_id.map(|i|tracing_core::span::Id::from_u64(i)), + &metadata, + &tracing::valueset!{ metadata.fields(), target, name, file, line, module_path, ?params } + ) + } + } + + impl crate::WasmEntryAttributes { + /// convert the given Attributes to an event and emit it using `tracing_core`. + pub fn emit(self: crate::WasmEntryAttributes) { + let name = std::str::from_utf8(&self.metadata.name).unwrap_or_default(); + let target = std::str::from_utf8(&self.metadata.target).unwrap_or_default(); + let file = std::str::from_utf8(&self.metadata.file).unwrap_or_default(); + let line = self.metadata.line; + let module_path = std::str::from_utf8(&self.metadata.module_path).unwrap_or_default(); + let params = self.fields; + let metadata : &tracing_core::metadata::Metadata<'static> = (&self.metadata).into(); + + tracing_core::Event::child_of( + self.parent_id.map(|i|tracing_core::span::Id::from_u64(i)), + &metadata, + &tracing::valueset!{ metadata.fields(), target, name, file, line, module_path, ?params } + ) + } + } +} + +#[cfg(feature = "std")] +pub use std_features::*; diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 344f491e55f9fd715bad83a2a28b9c73b01ea9f8..57ba3a28ac3c1b1f520925cba3c0940fa2e342cd 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Transaction pool primitives types & Runtime API." documentation = "https://docs.rs/sp-transaction-pool" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,9 +19,9 @@ derive_more = { version = "0.99.2", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", features = ["derive"], optional = true} -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../api" } -sp-blockchain = { version = "2.0.0-rc5", optional = true, path = "../blockchain" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0", default-features = false, path = "../api" } +sp-blockchain = { version = "2.0.0", optional = true, path = "../blockchain" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/transaction-pool/README.md b/primitives/transaction-pool/README.md new file mode 100644 index 0000000000000000000000000000000000000000..417565ebfce00c6a9961efa5def9bf327c37dd7f --- /dev/null +++ b/primitives/transaction-pool/README.md @@ -0,0 +1,3 @@ +Transaction pool primitives types & Runtime API. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 8dd386e095109e535c3964439c1e158c45c3fbef..7b7629bbf9bb2df9e38a9701418aaa0af5678011 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-trie" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" @@ -8,6 +8,7 @@ license = "Apache-2.0" edition = "2018" homepage = "https://substrate.dev" documentation = "https://docs.rs/sp-trie" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,19 +19,19 @@ harness = false [dependencies] codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } trie-db = { version = "0.22.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.24.0", default-features = false } -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../core" } +sp-core = { version = "2.0.0", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.24.0" +trie-bench = "0.25.0" trie-standardmap = "0.15.2" -criterion = "0.2.11" -hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-rc5", path = "../runtime" } +criterion = "0.3.3" +hex-literal = "0.3.1" +sp-runtime = { version = "2.0.0", path = "../runtime" } [features] default = ["std"] diff --git a/primitives/trie/README.md b/primitives/trie/README.md new file mode 100644 index 0000000000000000000000000000000000000000..634ba4bdead26d4a77e4c284866bf0cb37b3d93c --- /dev/null +++ b/primitives/trie/README.md @@ -0,0 +1,3 @@ +Utility functions to interact with Substrate's Base-16 Modified Merkle Patricia tree ("trie"). + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/utils/Cargo.toml b/primitives/utils/Cargo.toml index a554a44ce44a6ff2e7a309d724a386e640fd5ac8..80329d2e59ea976dbb0aa2d8f51bd998aa8899f7 100644 --- a/primitives/utils/Cargo.toml +++ b/primitives/utils/Cargo.toml @@ -1,18 +1,19 @@ [package] name = "sp-utils" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "I/O for Substrate runtimes" +readme = "README.md" [dependencies] futures = "0.3.4" futures-core = "0.3.4" lazy_static = "1.4.0" -prometheus = { version = "0.9.0", default-features = false } +prometheus = { version = "0.10.0", default-features = false } futures-timer = "3.0.2" [features] diff --git a/primitives/utils/README.md b/primitives/utils/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b0e04a3f4f1988e1a8ef88a9686e32d20bbcc4f8 --- /dev/null +++ b/primitives/utils/README.md @@ -0,0 +1,3 @@ +Utilities Primitives for Substrate + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index fb5bcaed77cbd474de56e9b1f77da8f428ea29b1..e9475846246ee951e055664e8db4f1067c7b482d 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,17 +8,18 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Version module for the Substrate runtime; Provides a function that returns the runtime version." documentation = "https://docs.rs/sp-version" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -impl-serde = { version = "0.2.3", optional = true } +impl-serde = { version = "0.3.1", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/version/README.md b/primitives/version/README.md new file mode 100644 index 0000000000000000000000000000000000000000..84f0ae57d9dbeef1c84e4e01a8769b5f886e4501 --- /dev/null +++ b/primitives/version/README.md @@ -0,0 +1,3 @@ +Version module for the Substrate runtime; Provides a function that returns the runtime version. + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 00fbaf5f7137272183edc34cb93b523e2b87fab6..a85b6cd1d118a0f00a4d65f60653a1d285e53249 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-wasm-interface" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Types and traits for interfacing between the host and the wasm runtime." documentation = "https://docs.rs/sp-wasm-interface" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] wasmi = { version = "0.6.2", optional = true } impl-trait-for-tuples = "0.1.2" -sp-std = { version = "2.0.0-rc5", path = "../std", default-features = false } +sp-std = { version = "2.0.0", path = "../std", default-features = false } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } [features] diff --git a/primitives/wasm-interface/README.md b/primitives/wasm-interface/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7e6c46581ae43c19b3b9b73d3d2b7975017c1a38 --- /dev/null +++ b/primitives/wasm-interface/README.md @@ -0,0 +1,3 @@ +Types and traits for interfacing between the host and the wasm runtime. + +License: Apache-2.0 \ No newline at end of file diff --git a/ss58-registry.json b/ss58-registry.json new file mode 100644 index 0000000000000000000000000000000000000000..f515d6ec23a22d8e978e5535cb57366fd5f76752 --- /dev/null +++ b/ss58-registry.json @@ -0,0 +1,320 @@ +{ + "specification": "https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)", + "schema": { + "prefix": "The address prefix. Must be an integer and unique.", + "network": "Unique identifier for the network that will use this prefix, string, no spaces. To integrate with CLI tools, e.g. `--network polkadot`.", + "displayName": "The name of the network that will use this prefix, in a format friendly for display.", + "symbols": "Array of symbols of any tokens the chain uses, usually 2-5 characters. Most chains will only have one. Chains that have multiple instances of the Balances pallet should order the array by instance.", + "decimals": "Array of integers representing the number of decimals that represent a single unit to the end user. Must be same length as `symbols` to represent each token's denomination.", + "standardAccount": "Signing curve for standard account. Substrate supports ed25519, sr25519, and secp256k1.", + "website": "A website or Github repo associated with the network." + }, + "registry": [ + { + "prefix": 0, + "network": "polkadot", + "displayName": "Polkadot Relay Chain", + "symbols": ["DOT"], + "decimals": [10], + "standardAccount": "*25519", + "website": "https://polkadot.network" + }, + { + "prefix": 1, + "network": "reserved1", + "displayName": "This prefix is reserved.", + "symbols": null, + "decimals": null, + "standardAccount": null, + "website": null + }, + { + "prefix": 2, + "network": "kusama", + "displayName": "Kusama Relay Chain", + "symbols": ["KSM"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://kusama.network" + }, + { + "prefix": 3, + "network": "reserved3", + "displayName": "This prefix is reserved.", + "symbols": null, + "decimals": null, + "standardAccount": null, + "website": null + }, + { + "prefix": 4, + "network": "katalchain", + "displayName": "Katal Chain", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 5, + "network": "plasm", + "displayName": "Plasm Network", + "symbols": ["PLM"], + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 6, + "network": "bitfrost", + "displayName": "Bitfrost", + "symbols": ["BNC"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://bifrost.finance/" + }, + { + "prefix": 7, + "network": "edgeware", + "displayName": "Edgeware", + "symbols": ["EDG"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://edgewa.re" + }, + { + "prefix": 8, + "network": "karura", + "displayName": "Acala Karura Canary", + "symbols": ["KAR"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://acala.network/" + }, + { + "prefix": 9, + "network": "reynolds", + "displayName": "Laminar Reynolds Canary", + "symbols": ["REY"], + "decimals": [18], + "standardAccount": "*25519", + "website": ["http://laminar.network/"] + }, + { + "prefix": 10, + "network": "acala", + "displayName": "Acala", + "symbols": ["ACA"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://acala.network/" + }, + { + "prefix": 11, + "network": "laminar", + "displayName": "Laminar", + "symbols": ["LAMI"], + "decimals": [18], + "standardAccount": "*25519", + "website": ["http://laminar.network/"] + }, + { + "prefix": 12, + "network": "polymath", + "displayName": "Polymath", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 13, + "network": "substratee", + "displayName": "SubstraTEE", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": "https://www.substratee.com" + }, + { + "prefix": 16, + "network": "kulupu", + "displayName": "Kulupu", + "symbols": ["KLP"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://kulupu.network/" + }, + { + "prefix": 17, + "network": "dark", + "displayName": "Dark Mainnet", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 18, + "network": "darwinia", + "displayName": "Darwinia Chain", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 20, + "network": "stafi", + "displayName": "Stafi", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 21, + "network": "dock-testnet", + "displayName": "Dock Testnet", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 22, + "network": "dock-mainnet", + "displayName": "Dock Mainnet", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 23, + "network": "shift", + "displayName": "ShiftNrg", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 28, + "network": "subsocial", + "displayName": "Subsocial", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 30, + "network": "phala", + "displayName": "Phala Network", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 32, + "network": "robonomics", + "displayName": "Robonomics Network", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 33, + "network": "datahighway", + "displayName": "DataHighway", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 36, + "network": "centrifuge", + "displayName": "Centrifuge Chain", + "symbols": ["RAD"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://centrifuge.io/" + }, + { + "prefix": 39, + "network": "mathchain", + "displayName": "MathChain mainnet", + "symbols": ["MATH"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://mathwallet.org" + }, + { + "prefix": 40, + "network": "mathchain-testnet", + "displayName": "MathChain testnet", + "symbols": ["MATH"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://mathwallet.org" + }, + { + "prefix": 42, + "network": "substrate", + "displayName": "Substrate", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": "https://substrate.dev/" + }, + { + "prefix": 43, + "network": "reserved43", + "displayName": "This prefix is reserved.", + "symbols": null, + "decimals": null, + "standardAccount": null, + "website": null + }, + { + "prefix": 44, + "network": "chainx", + "displayName": "ChainX", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": null + }, + { + "prefix": 46, + "network": "reserved46", + "displayName": "This prefix is reserved.", + "symbols": null, + "decimals": null, + "standardAccount": null, + "website": null + }, + { + "prefix": 47, + "network": "reserved47", + "displayName": "This prefix is reserved.", + "symbols": null, + "decimals": null, + "standardAccount": null, + "website": null + }, + { + "prefix": 48, + "network": "reserved48", + "displayName": "All prefixes 48 and higher are reserved and cannot be allocated.", + "symbols": null, + "decimals": null, + "standardAccount": null, + "website": null + } + ] +} diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 3b2a3702430e77547585e3903a2bd63013b51732..ddadc2cb7177d8c2645059e4c71e85000e26cba1 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,20 +1,21 @@ [package] name = "substrate-test-utils" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +description = "Substrate test utilities" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = { version = "0.3.1", features = ["compat"] } -substrate-test-utils-derive = { path = "./derive" } +substrate-test-utils-derive = { version = "0.8.0", path = "./derive" } tokio = { version = "0.2.13", features = ["macros"] } [dev-dependencies] -sc-service = { path = "../client/service" } +sc-service = { version = "0.8.0", path = "../client/service" } trybuild = { version = "1.0", features = ["diff"] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 5e97be2e0b3d03e98a8a5d25eac6044d2d89146d..91602b0b27c4927469f792e98db9a37ccd8194f9 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-client" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -19,15 +19,15 @@ hash-db = "0.15.2" hex = "0.4" serde = "1.0.55" serde_json = "1.0.55" -sc-client-api = { version = "2.0.0-rc5", path = "../../client/api" } -sc-client-db = { version = "0.8.0-rc5", features = ["test-helpers"], path = "../../client/db" } -sc-consensus = { version = "0.8.0-rc5", path = "../../client/consensus/common" } -sc-executor = { version = "0.8.0-rc5", path = "../../client/executor" } -sc-light = { version = "2.0.0-rc5", path = "../../client/light" } -sc-service = { version = "0.8.0-rc5", default-features = false, features = ["test-helpers"], path = "../../client/service" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-rc5", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0-rc5", path = "../../primitives/core" } -sp-keyring = { version = "2.0.0-rc5", path = "../../primitives/keyring" } -sp-runtime = { version = "2.0.0-rc5", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sc-client-api = { version = "2.0.0", path = "../../client/api" } +sc-client-db = { version = "0.8.0", features = ["test-helpers"], path = "../../client/db" } +sc-consensus = { version = "0.8.0", path = "../../client/consensus/common" } +sc-executor = { version = "0.8.0", path = "../../client/executor" } +sc-light = { version = "2.0.0", path = "../../client/light" } +sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../client/service" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index 5ec3e10108c032a8c013b6f991e6aa7c01896738..263bfd35373409634ae12554aa33172c00210a98 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -1,11 +1,12 @@ [package] name = "substrate-test-utils-derive" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +description = "Substrate test utilities macros" [dependencies] quote = "1.0.6" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 08e1b955ab43c8bcc84eac06857c932f948b04ce..4e388861f1bab9325c1cb6d9a1421749c8596a7b 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -13,35 +13,37 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.8.0-rc5", default-features = false, path = "../../primitives/consensus/babe" } -sp-block-builder = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/block-builder" } +sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/aura" } +sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/babe" } +sp-block-builder = { version = "2.0.0", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-executive = { version = "2.0.0-rc5", default-features = false, path = "../../frame/executive" } -sp-inherents = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-rc5", optional = true, path = "../../primitives/keyring" } +frame-executive = { version = "2.0.0", default-features = false, path = "../../frame/executive" } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } memory-db = { version = "0.24.0", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-rc5"} -sp-core = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-rc5"} -sp-io = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-rc5", default-features = false, path = "../../frame/support" } -sp-version = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/version" } -sp-session = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/session" } -sp-api = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/runtime" } -pallet-babe = { version = "2.0.0-rc5", default-features = false, path = "../../frame/babe" } -frame-system = { version = "2.0.0-rc5", default-features = false, path = "../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", default-features = false, path = "../../frame/system/rpc/runtime-api" } -pallet-timestamp = { version = "2.0.0-rc5", default-features = false, path = "../../frame/timestamp" } -sp-finality-grandpa = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/finality-grandpa" } -sp-trie = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0-rc5", default-features = false, path = "../../primitives/transaction-pool" } +sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0"} +sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0"} +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0", default-features = false, path = "../../frame/support" } +sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } +sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } +sp-api = { version = "2.0.0", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +pallet-babe = { version = "2.0.0", default-features = false, path = "../../frame/babe" } +frame-system = { version = "2.0.0", default-features = false, path = "../../frame/system" } +frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../frame/system/rpc/runtime-api" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../frame/timestamp" } +sp-finality-grandpa = { version = "2.0.0", default-features = false, path = "../../primitives/finality-grandpa" } +sp-trie = { version = "2.0.0", default-features = false, path = "../../primitives/trie" } +sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.22.0", default-features = false } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-service = { version = "0.8.0-rc5", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } +sc-service = { version = "0.8.0", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } +sp-state-machine = { version = "0.8.0", default-features = false, path = "../../primitives/state-machine" } +sp-externalities = { version = "0.8.0", default-features = false, path = "../../primitives/externalities" } # 3rd party cfg-if = "0.1.10" @@ -49,10 +51,9 @@ log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } [dev-dependencies] -sc-block-builder = { version = "0.8.0-rc5", path = "../../client/block-builder" } -sc-executor = { version = "0.8.0-rc5", path = "../../client/executor" } -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "./client" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../primitives/state-machine" } +sc-block-builder = { version = "0.8.0", path = "../../client/block-builder" } +sc-executor = { version = "0.8.0", path = "../../client/executor" } +substrate-test-runtime-client = { version = "2.0.0", path = "./client" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } @@ -84,6 +85,8 @@ std = [ "sp-session/std", "sp-api/std", "sp-runtime/std", + "sp-externalities/std", + "sp-state-machine/std", "pallet-babe/std", "frame-system-rpc-runtime-api/std", "frame-system/std", diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index c67ceb720100c43c9b4fc0a3fe16212fc9ac2ce0..b310bbe7a709a9aeed76ccc6ac83ccd4663d6bb8 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime-client" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,17 +12,17 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-light = { version = "2.0.0-rc5", path = "../../../client/light" } -sp-consensus = { version = "0.8.0-rc5", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0-rc5", path = "../../../client/block-builder" } -substrate-test-client = { version = "2.0.0-rc5", path = "../../client" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -substrate-test-runtime = { version = "2.0.0-rc5", path = "../../runtime" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } +sc-light = { version = "2.0.0", path = "../../../client/light" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } +substrate-test-client = { version = "2.0.0", path = "../../client" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-client-api = { version = "2.0.0-rc5", path = "../../../client/api" } -sc-consensus = { version = "0.8.0-rc5", path = "../../../client/consensus/common" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } +sc-client-api = { version = "2.0.0", path = "../../../client/api" } +sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } +sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } futures = "0.3.4" diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index fedbff5a109ba1b90bd936e62a09025081ca2921..5ab4d99dee0ad4d4062d5c4f55481ca56e0524e6 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -29,7 +29,7 @@ use codec::{Encode, Decode, Input, Error}; use sp_core::{offchain::KeyTypeId, ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; use sp_application_crypto::{ed25519, sr25519, ecdsa, RuntimeAppPublic}; use trie_db::{TrieMut, Trie}; -use sp_trie::PrefixedMemoryDB; +use sp_trie::{PrefixedMemoryDB, StorageProof}; use sp_trie::trie_types::{TrieDB, TrieDBMut}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; @@ -59,6 +59,7 @@ use cfg_if::cfg_if; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AuthorityId, SlotNumber, AllowedSlots}; + pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; // Include the WASM binary @@ -334,6 +335,8 @@ cfg_if! { fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic); /// Run various tests against storage. fn test_storage(); + /// Check a witness. + fn test_witness(proof: StorageProof, root: crate::Hash); /// Test that ensures that we can call a function that takes multiple /// arguments. fn test_multiple_arguments(data: Vec, other: Vec, num: u32); @@ -383,6 +386,8 @@ cfg_if! { fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic); /// Run various tests against storage. fn test_storage(); + /// Check a witness. + fn test_witness(proof: StorageProof, root: crate::Hash); /// Test that ensures that we can call a function that takes multiple /// arguments. fn test_multiple_arguments(data: Vec, other: Vec, num: u32); @@ -448,7 +453,7 @@ impl frame_system::Trait for Runtime { type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type ModuleToIndex = (); + type PalletInfo = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -487,6 +492,8 @@ impl pallet_babe::Trait for Runtime { )>>::IdentificationTuple; type HandleEquivocation = (); + + type WeightInfo = (); } /// Adds one to the given input and returns the final result. @@ -683,6 +690,10 @@ cfg_if! { test_read_child_storage(); } + fn test_witness(proof: StorageProof, root: crate::Hash) { + test_witness(proof, root); + } + fn test_multiple_arguments(data: Vec, other: Vec, num: u32) { assert_eq!(&data[..], &other[..]); assert_eq!(data.len(), num as usize); @@ -925,6 +936,10 @@ cfg_if! { test_read_child_storage(); } + fn test_witness(proof: StorageProof, root: crate::Hash) { + test_witness(proof, root); + } + fn test_multiple_arguments(data: Vec, other: Vec, num: u32) { assert_eq!(&data[..], &other[..]); assert_eq!(data.len(), num as usize); @@ -1054,17 +1069,13 @@ fn test_read_storage() { sp_io::storage::set(KEY, b"test"); let mut v = [0u8; 4]; - let r = sp_io::storage::read( - KEY, - &mut v, - 0 - ); + let r = sp_io::storage::read(KEY, &mut v, 0); assert_eq!(r, Some(4)); assert_eq!(&v, b"test"); let mut v = [0u8; 4]; - let r = sp_io::storage::read(KEY, &mut v, 8); - assert_eq!(r, Some(4)); + let r = sp_io::storage::read(KEY, &mut v, 4); + assert_eq!(r, Some(0)); assert_eq!(&v, &[0, 0, 0, 0]); } @@ -1094,10 +1105,38 @@ fn test_read_child_storage() { &mut v, 8, ); - assert_eq!(r, Some(4)); + assert_eq!(r, Some(0)); assert_eq!(&v, &[0, 0, 0, 0]); } +fn test_witness(proof: StorageProof, root: crate::Hash) { + use sp_externalities::Externalities; + let db: sp_trie::MemoryDB = proof.into_memory_db(); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new( + db, + root, + ); + let mut overlay = sp_state_machine::OverlayedChanges::default(); + #[cfg(feature = "std")] + let mut offchain_overlay = Default::default(); + let mut cache = sp_state_machine::StorageTransactionCache::<_, _, BlockNumber>::default(); + let mut ext = sp_state_machine::Ext::new( + &mut overlay, + #[cfg(feature = "std")] + &mut offchain_overlay, + &mut cache, + &backend, + #[cfg(feature = "std")] + None, + #[cfg(feature = "std")] + None, + ); + assert!(ext.storage(b"value3").is_some()); + assert!(ext.storage_root().as_slice() == &root[..]); + ext.place_storage(vec![0], Some(vec![1])); + assert!(ext.storage_root().as_slice() != &root[..]); +} + #[cfg(test)] mod tests { use substrate_test_runtime_client::{ @@ -1156,4 +1195,33 @@ mod tests { runtime_api.test_storage(&block_id).unwrap(); } + + fn witness_backend() -> (sp_trie::MemoryDB, crate::Hash) { + use sp_trie::TrieMut; + let mut root = crate::Hash::default(); + let mut mdb = sp_trie::MemoryDB::::default(); + { + let mut trie = sp_trie::trie_types::TrieDBMut::new(&mut mdb, &mut root); + trie.insert(b"value3", &[142]).expect("insert failed"); + trie.insert(b"value4", &[124]).expect("insert failed"); + }; + (mdb, root) + } + + #[test] + fn witness_backend_works() { + let (db, root) = witness_backend(); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new( + db, + root, + ); + let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + + runtime_api.test_witness(&block_id, proof, root).unwrap(); + } } diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index da4d2d592de4afa959a5ce0ba0e985616abba340..a37477fdae58fb6c1161f0dde41d4650fc44b37c 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../client" } +substrate-test-runtime-client = { version = "2.0.0", path = "../client" } parking_lot = "0.10.0" codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../primitives/transaction-pool" } -sc-transaction-graph = { version = "2.0.0-rc5", path = "../../../client/transaction-pool/graph" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sc-transaction-graph = { version = "2.0.0", path = "../../../client/transaction-pool/graph" } futures = { version = "0.3.1", features = ["compat"] } derive_more = "0.99.2" diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index 6d16edde12c594a0dba387499a38263ffafd0330..4e1273b25c9934bf302f47bcb7a97f22e7e995c9 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -6,11 +6,12 @@ edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] tokio = { version = "0.2.13", features = ["macros"] } -test-utils = { path = "..", package = "substrate-test-utils" } -sc-service = { path = "../../client/service" } +test-utils = { version = "2.0.0", path = "..", package = "substrate-test-utils" } +sc-service = { version = "0.8.0", path = "../../client/service" } diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 20e04148fafc1fc1b73d93746565b300173bf01d..06e626ef65ffa233ee1461432949e8ae6b67dfff 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "substrate-browser-utils" -version = "0.8.0-rc5" +version = "0.8.0" authors = ["Parity Technologies "] description = "Utilities for creating a browser light-client." edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,18 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.20", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.22", features = ["websocket"] } console_error_panic_hook = "0.1.6" console_log = "0.1.2" js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" kvdb-web = "0.7" -sp-database = { version = "2.0.0-rc5", path = "../../primitives/database" } -sc-informant = { version = "0.8.0-rc5", path = "../../client/informant" } -sc-service = { version = "0.8.0-rc5", path = "../../client/service", default-features = false } -sc-network = { path = "../../client/network", version = "0.8.0-rc5"} -sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0-rc5"} +sp-database = { version = "2.0.0", path = "../../primitives/database" } +sc-informant = { version = "0.8.0", path = "../../client/informant" } +sc-service = { version = "0.8.0", path = "../../client/service", default-features = false } +sc-network = { path = "../../client/network", version = "0.8.0"} +sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0"} # Imported just for the `wasm-bindgen` feature rand6 = { package = "rand", version = "0.6", features = ["wasm-bindgen"] } diff --git a/utils/browser/README.md b/utils/browser/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9718db58b37e9ae1c81b5b627fe27e51129cf418 --- /dev/null +++ b/utils/browser/README.md @@ -0,0 +1 @@ +License: Apache-2.0 \ No newline at end of file diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index ecf15503c54825488f44cd8de5bd1d7b8c078dfb..30c8a4c52b6579b36cd9194fe61ba04cfda98269 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "substrate-build-script-utils" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Crate with utility functions for `build.rs` scripts." +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/build-script-utils/README.md b/utils/build-script-utils/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c184f67326e3c3170b1a92767deffbdcbfa9c70 --- /dev/null +++ b/utils/build-script-utils/README.md @@ -0,0 +1,3 @@ +Crate with utility functions for `build.rs` scripts. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 30a38545980101067cc4c9681245fcb62bcf48a9..23662722a1f6c0518db13f7d8e1c892e0a6535fa 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fork-tree" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,6 +8,7 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Utility library for managing tree-like ordered data with logic for pruning the tree while finalizing nodes." documentation = "https://docs.rs/fork-tree" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/fork-tree/README.md b/utils/fork-tree/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fef7db57f68f21ed590bc6df8e3a3444dc544476 --- /dev/null +++ b/utils/fork-tree/README.md @@ -0,0 +1,4 @@ +Utility library for managing tree-like ordered data with logic for pruning +the tree while finalizing nodes. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index c34404575e5fc7be4780735456368a02d1125703..ebaab2b1c58b57b14260dc465d91dd9fab745314 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -1,26 +1,27 @@ [package] name = "frame-benchmarking-cli" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "CLI for benchmarking FRAME" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { version = "2.0.0-rc5", path = "../../../frame/benchmarking" } -sp-core = { version = "2.0.0-rc5", path = "../../../primitives/core" } -sc-service = { version = "0.8.0-rc5", default-features = false, path = "../../../client/service" } -sc-cli = { version = "0.8.0-rc5", path = "../../../client/cli" } -sc-client-db = { version = "0.8.0-rc5", path = "../../../client/db" } -sc-executor = { version = "0.8.0-rc5", path = "../../../client/executor" } -sp-externalities = { version = "0.8.0-rc5", path = "../../../primitives/externalities" } -sp-runtime = { version = "2.0.0-rc5", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-rc5", path = "../../../primitives/state-machine" } +frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } +sc-cli = { version = "0.8.0", path = "../../../client/cli" } +sc-client-db = { version = "0.8.0", path = "../../../client/db" } +sc-executor = { version = "0.8.0", path = "../../../client/executor" } +sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } structopt = "0.3.8" codec = { version = "1.3.1", package = "parity-scale-codec" } diff --git a/utils/frame/benchmarking-cli/README.md b/utils/frame/benchmarking-cli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9718db58b37e9ae1c81b5b627fe27e51129cf418 --- /dev/null +++ b/utils/frame/benchmarking-cli/README.md @@ -0,0 +1 @@ +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 688e393bd605a5fc712df065b6719495fbde9ec6..b0791f88ce5f60e1123242a702874af6dbed0aef 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -75,6 +75,7 @@ impl BenchmarkCmd { self.highest_range_values.clone(), self.steps.clone(), self.repeat, + !self.no_verify, self.extra, ).encode(), extensions, diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index c2a228fc86aefeb8817f77979be8c8e14f1e3952..8cbb3c786876c6ecb1dd93b9fed94242cf4b3b66 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -72,6 +72,10 @@ pub struct BenchmarkCmd { #[structopt(long)] pub heap_pages: Option, + /// Disable verification logic when running benchmarks. + #[structopt(long)] + pub no_verify: bool, + /// Display and run extra benchmarks that would otherwise not be needed for weight construction. #[structopt(long)] pub extra: bool, diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index 2bc17aa85bddbdb6ea953f1cfa734fa2296adb6f..2f5fd92ff01f064d34128ac7daa55867f6632b78 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -32,6 +32,21 @@ pub fn open_file(path: &str) -> Result { .open(path) } +fn underscore(i: Number) -> String + where Number: std::string::ToString +{ + let mut s = String::new(); + let i_str = i.to_string(); + let a = i_str.chars().rev().enumerate(); + for (idx, val) in a { + if idx != 0 && idx % 3 == 0 { + s.insert(0, '_'); + } + s.insert(0, val); + } + s +} + pub fn write_trait(file: &mut File, batches: Vec) -> Result<(), std::io::Error> { let mut current_pallet = Vec::::new(); @@ -107,6 +122,12 @@ pub fn write_results(batches: &[BenchmarkBatch]) -> Result<(), std::io::Error> { VERSION, )?; + // allow statements + write!( + file, + "#![allow(unused_parens)]\n#![allow(unused_imports)]\n\n", + )?; + // general imports write!( file, @@ -170,10 +191,10 @@ pub fn write_results(batches: &[BenchmarkBatch]) -> Result<(), std::io::Error> { // return value write!(file, ") -> Weight {{\n")?; - write!(file, "\t\t({} as Weight)\n", extrinsic_time.base.saturating_mul(1000))?; + write!(file, "\t\t({} as Weight)\n", underscore(extrinsic_time.base.saturating_mul(1000)))?; used_extrinsic_time.iter().try_for_each(|(slope, name)| -> Result<(), std::io::Error> { write!(file, "\t\t\t.saturating_add(({} as Weight).saturating_mul({} as Weight))\n", - slope.saturating_mul(1000), + underscore(slope.saturating_mul(1000)), name, ) })?; diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0e39f35512541ff0fc0dcfdb91199fb841d03875 --- /dev/null +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "substrate-frame-cli" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "cli interface for FRAME" +documentation = "https://docs.rs/substrate-frame-cli" +readme = "README.md" + +[dependencies] +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-cli = { version = "0.8.0", path = "../../../client/cli" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +structopt = "0.3.8" +frame-system = { version = "2.0.0", path = "../../../frame/system" } + +[dev-dependencies] + +[features] +default = [] diff --git a/utils/frame/frame-utilities-cli/README.md b/utils/frame/frame-utilities-cli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b1e4f869af7580687e663c5bf2e3c89a1badd35a --- /dev/null +++ b/utils/frame/frame-utilities-cli/README.md @@ -0,0 +1,3 @@ +frame-system CLI utilities + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/frame-utilities-cli/src/lib.rs b/utils/frame/frame-utilities-cli/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..872cfc99a63dc13670c2f5ec2f1a66e300634e1d --- /dev/null +++ b/utils/frame/frame-utilities-cli/src/lib.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! frame-system CLI utilities + +mod module_id; + +pub use module_id::ModuleIdCmd; + diff --git a/utils/frame/frame-utilities-cli/src/module_id.rs b/utils/frame/frame-utilities-cli/src/module_id.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc76c70d0fa8e511c889377840202c403da26106 --- /dev/null +++ b/utils/frame/frame-utilities-cli/src/module_id.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `moduleid` subcommand + +use sc_cli::{ + Error, utils::print_from_uri, CryptoSchemeFlag, + OutputTypeFlag, KeystoreParams, with_crypto_scheme, +}; +use sp_runtime::ModuleId; +use sp_runtime::traits::AccountIdConversion; +use sp_core::crypto::{Ss58Codec, Ss58AddressFormat}; +use std::convert::{TryInto, TryFrom}; +use structopt::StructOpt; + +/// The `moduleid` command +#[derive(Debug, StructOpt)] +#[structopt( + name = "moduleid", + about = "Inspect a module ID address" +)] +pub struct ModuleIdCmd { + /// The module ID used to derive the account + id: String, + + /// network address format + #[structopt( + long, + value_name = "NETWORK", + possible_values = &Ss58AddressFormat::all_names()[..], + parse(try_from_str = Ss58AddressFormat::try_from), + case_insensitive = true, + )] + pub network: Option, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub output_scheme: OutputTypeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, +} + +impl ModuleIdCmd { + /// runs the command + pub fn run(&self) -> Result<(), Error> + where + R: frame_system::Trait, + R::AccountId: Ss58Codec, + { + if self.id.len() != 8 { + Err("a module id must be a string of 8 characters")? + } + let password = self.keystore_params.read_password()?; + + let id_fixed_array: [u8; 8] = self.id.as_bytes() + .try_into() + .map_err(|_| "Cannot convert argument to moduleid: argument should be 8-character string")?; + + let account_id: R::AccountId = ModuleId(id_fixed_array).into_account(); + + with_crypto_scheme!( + self.crypto_scheme.scheme, + print_from_uri( + &account_id.to_ss58check_with_version(self.network.clone().unwrap_or_default()), + password, + self.network, + self.output_scheme.output_type.clone() + ) + ); + + Ok(()) + } +} + diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index d5f34fe2b2d0eabd05549a5f42dd5450f101111d..2541ed0cf655f5ff574832fa77a8a53f540ef289 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-support" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies ", "Andrew Dirksen "] edition = "2018" license = "Apache-2.0" @@ -13,14 +13,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = { version = "0.3.0", features = ["compat"] } -jsonrpc-client-transports = { version = "14.2.0", default-features = false, features = ["http"] } -jsonrpc-core = "14.2.0" +jsonrpc-client-transports = { version = "15.0.0", default-features = false, features = ["http"] } +jsonrpc-core = "15.0.0" codec = { package = "parity-scale-codec", version = "1.3.1" } serde = "1" -frame-support = { version = "2.0.0-rc5", path = "../../../../frame/support" } -sp-storage = { version = "2.0.0-rc5", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../../client/rpc-api" } +frame-support = { version = "2.0.0", path = "../../../../frame/support" } +sp-storage = { version = "2.0.0", path = "../../../../primitives/storage" } +sc-rpc-api = { version = "0.8.0", path = "../../../../client/rpc-api" } [dev-dependencies] -frame-system = { version = "2.0.0-rc5", path = "../../../../frame/system" } +frame-system = { version = "2.0.0", path = "../../../../frame/system" } tokio = "0.2" diff --git a/utils/frame/rpc/support/README.md b/utils/frame/rpc/support/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca5750612931ce385a2251b86a350afca5dd97da --- /dev/null +++ b/utils/frame/rpc/support/README.md @@ -0,0 +1,4 @@ +Combines [sc_rpc_api::state::StateClient] with [frame_support::storage::generator] traits +to provide strongly typed chain state queries over rpc. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 33a949fddd07082ed2b27de941c0bba851324808..515ff93251522025fbb7c695663337b33d81f302 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -1,35 +1,36 @@ [package] name = "substrate-frame-rpc-system" -version = "2.0.0-rc5" +version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME's system exposed over Substrate RPC" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0-rc5", path = "../../../../client/api" } +sc-client-api = { version = "2.0.0", path = "../../../../client/api" } codec = { package = "parity-scale-codec", version = "1.3.1" } futures = { version = "0.3.4", features = ["compat"] } -jsonrpc-core = "14.2.0" -jsonrpc-core-client = "14.2.0" -jsonrpc-derive = "14.2.1" +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" log = "0.4.8" serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-rc5", path = "../../../../primitives/runtime" } -sp-api = { version = "2.0.0-rc5", path = "../../../../primitives/api" } -frame-system-rpc-runtime-api = { version = "2.0.0-rc5", path = "../../../../frame/system/rpc/runtime-api" } -sp-core = { version = "2.0.0-rc5", path = "../../../../primitives/core" } -sp-blockchain = { version = "2.0.0-rc5", path = "../../../../primitives/blockchain" } -sp-transaction-pool = { version = "2.0.0-rc5", path = "../../../../primitives/transaction-pool" } -sp-block-builder = { version = "2.0.0-rc5", path = "../../../../primitives/block-builder" } -sc-rpc-api = { version = "0.8.0-rc5", path = "../../../../client/rpc-api" } +sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } +sp-api = { version = "2.0.0", path = "../../../../primitives/api" } +frame-system-rpc-runtime-api = { version = "2.0.0", path = "../../../../frame/system/rpc/runtime-api" } +sp-core = { version = "2.0.0", path = "../../../../primitives/core" } +sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } +sp-transaction-pool = { version = "2.0.0", path = "../../../../primitives/transaction-pool" } +sp-block-builder = { version = "2.0.0", path = "../../../../primitives/block-builder" } +sc-rpc-api = { version = "0.8.0", path = "../../../../client/rpc-api" } [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0-rc5", path = "../../../../test-utils/runtime/client" } -env_logger = "0.7.0" -sc-transaction-pool = { version = "2.0.0-rc5", path = "../../../../client/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } +sp-tracing = { version = "2.0.0", path = "../../../../primitives/tracing" } +sc-transaction-pool = { version = "2.0.0", path = "../../../../client/transaction-pool" } diff --git a/utils/frame/rpc/system/README.md b/utils/frame/rpc/system/README.md new file mode 100644 index 0000000000000000000000000000000000000000..38986983d93c5b3794f93c56d50840de9a4ead18 --- /dev/null +++ b/utils/frame/rpc/system/README.md @@ -0,0 +1,3 @@ +System FRAME specific RPC methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 2bb46369fea544437d086f36144770644baf568d..cefe39534a16723de7bf0fd7e9d41c0075b7b695 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -294,7 +294,7 @@ mod tests { #[test] fn should_return_next_nonce_for_some_account() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); // given let client = Arc::new(substrate_test_runtime_client::new()); @@ -333,7 +333,7 @@ mod tests { #[test] fn dry_run_should_deny_unsafe() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); // given let client = Arc::new(substrate_test_runtime_client::new()); @@ -356,7 +356,7 @@ mod tests { #[test] fn dry_run_should_work() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); // given let client = Arc::new(substrate_test_runtime_client::new()); @@ -388,7 +388,7 @@ mod tests { #[test] fn dry_run_should_indicate_error() { - let _ = env_logger::try_init(); + sp_tracing::try_init_simple(); // given let client = Arc::new(substrate_test_runtime_client::new()); diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 805ea19cdc61d5507e51508fd8251513774be750..8fdbbc6124f58be1d67ae6e3811aebf61ef0b227 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,19 +1,20 @@ [package] description = "Endpoint to expose Prometheus metrics" name = "substrate-prometheus-endpoint" -version = "0.8.0-rc5" +version = "0.8.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus = { version = "0.9", default-features = false } +prometheus = { version = "0.10.0", default-features = false } futures-util = { version = "0.3.1", default-features = false, features = ["io"] } derive_more = "0.99" diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index 9030704cb746ffbb4fa9ebfe5f468751a67a28d2..be7050a8a07369ccf83db3a572bcec77e8a67521 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -31,6 +31,9 @@ use std::net::SocketAddr; #[cfg(not(target_os = "unknown"))] mod networking; +mod sourced; + +pub use sourced::{SourcedCounter, SourcedGauge, MetricSource}; #[cfg(target_os = "unknown")] pub use unknown_os::init_prometheus; diff --git a/utils/prometheus/src/sourced.rs b/utils/prometheus/src/sourced.rs new file mode 100644 index 0000000000000000000000000000000000000000..58f60e4969bb8c8bd79b0a1c8c0ab6109feef83c --- /dev/null +++ b/utils/prometheus/src/sourced.rs @@ -0,0 +1,143 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Metrics that are collected from existing sources. + +use prometheus::core::{Collector, Desc, Describer, Number, Opts}; +use prometheus::proto; +use std::{cmp::Ordering, marker::PhantomData}; + +/// A counter whose values are obtained from an existing source. +/// +/// > **Note*: The counter values provided by the source `S` +/// > must be monotonically increasing. Otherwise use a +/// > [`SourcedGauge`] instead. +pub type SourcedCounter = SourcedMetric; + +/// A gauge whose values are obtained from an existing source. +pub type SourcedGauge = SourcedMetric; + +/// The type of a sourced counter. +#[derive(Copy, Clone)] +pub enum Counter {} + +/// The type of a sourced gauge. +#[derive(Copy, Clone)] +pub enum Gauge {} + +/// A metric whose values are obtained from an existing source, +/// instead of being independently recorded. +#[derive(Debug, Clone)] +pub struct SourcedMetric { + source: S, + desc: Desc, + _type: PhantomData, +} + +/// A source of values for a [`SourcedMetric`]. +pub trait MetricSource: Sync + Send + Clone { + /// The type of the collected values. + type N: Number; + /// Collects the current values of the metrics from the source. + fn collect(&self, set: impl FnMut(&[&str], Self::N)); +} + +impl SourcedMetric { + /// Creates a new metric that obtains its values from the given source. + pub fn new(opts: &Opts, source: S) -> prometheus::Result { + let desc = opts.describe()?; + Ok(Self { source, desc, _type: PhantomData }) + } +} + +impl Collector for SourcedMetric { + fn desc(&self) -> Vec<&Desc> { + vec![&self.desc] + } + + fn collect(&self) -> Vec { + let mut counters = Vec::new(); + + self.source.collect(|label_values, value| { + let mut m = proto::Metric::default(); + + match T::proto() { + proto::MetricType::COUNTER => { + let mut c = proto::Counter::default(); + c.set_value(value.into_f64()); + m.set_counter(c); + } + proto::MetricType::GAUGE => { + let mut g = proto::Gauge::default(); + g.set_value(value.into_f64()); + m.set_gauge(g); + } + t => { + log::error!("Unsupported sourced metric type: {:?}", t); + } + } + + debug_assert_eq!(self.desc.variable_labels.len(), label_values.len()); + match self.desc.variable_labels.len().cmp(&label_values.len()) { + Ordering::Greater => + log::warn!("Missing label values for sourced metric {}", self.desc.fq_name), + Ordering::Less => + log::warn!("Too many label values for sourced metric {}", self.desc.fq_name), + Ordering::Equal => {} + } + + m.set_label(self.desc.variable_labels.iter().zip(label_values) + .map(|(l_name, l_value)| { + let mut l = proto::LabelPair::default(); + l.set_name(l_name.to_string()); + l.set_value(l_value.to_string()); + l + }) + .chain(self.desc.const_label_pairs.iter().cloned()) + .collect::>()); + + counters.push(m); + }); + + let mut m = proto::MetricFamily::default(); + m.set_name(self.desc.fq_name.clone()); + m.set_help(self.desc.help.clone()); + m.set_field_type(T::proto()); + m.set_metric(counters); + + vec![m] + } +} + +/// Types of metrics that can obtain their values from an existing source. +pub trait SourcedType: private::Sealed + Sync + Send { + #[doc(hidden)] + fn proto() -> proto::MetricType; +} + +impl SourcedType for Counter { + fn proto() -> proto::MetricType { proto::MetricType::COUNTER } +} + +impl SourcedType for Gauge { + fn proto() -> proto::MetricType { proto::MetricType::GAUGE } +} + +mod private { + pub trait Sealed {} + impl Sealed for super::Counter {} + impl Sealed for super::Gauge {} +} diff --git a/utils/wasm-builder-runner/src/lib.rs b/utils/wasm-builder-runner/src/lib.rs index 7990ea2bb97d126fe8ac6065690db2b482a213f7..b0b1ac479e791f074d4368c4e5c6f04b86cca34d 100644 --- a/utils/wasm-builder-runner/src/lib.rs +++ b/utils/wasm-builder-runner/src/lib.rs @@ -44,7 +44,7 @@ const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; const DUMMY_WASM_BINARY_ENV: &str = "BUILD_DUMMY_WASM_BINARY"; /// Environment variable that makes sure the WASM build is triggered. -const TRIGGER_WASM_BUILD_ENV: &str = "TRIGGER_WASM_BUILD"; +const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; /// Replace all backslashes with slashes. fn replace_back_slashes(path: T) -> String { @@ -476,6 +476,6 @@ fn generate_rerun_if_changed_instructions() { // Make sure that the `build.rs` is called again if one of the following env variables changes. println!("cargo:rerun-if-env-changed={}", SKIP_BUILD_ENV); println!("cargo:rerun-if-env-changed={}", DUMMY_WASM_BINARY_ENV); - println!("cargo:rerun-if-env-changed={}", TRIGGER_WASM_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", FORCE_WASM_BUILD_ENV); println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); } diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 5e90625620508267e3928c999b0079f03d9a202a..de0f11e84670db2bde74639b8eab4433d424735b 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -22,3 +22,4 @@ fs2 = "0.4.3" wasm-gc-api = "0.1.11" atty = "0.2.13" itertools = "0.8.2" +ansi_term = "0.12.1" diff --git a/utils/wasm-builder/README.md b/utils/wasm-builder/README.md index b72e7e16d4ff4110597e4f44119d571842b79e3e..1e24d2cebab3220b68d281a33a495a01999f0130 100644 --- a/utils/wasm-builder/README.md +++ b/utils/wasm-builder/README.md @@ -51,9 +51,9 @@ By using environment variables, you can configure which Wasm binaries are built for `cargo check` runs. - `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. By default the build type is equal to the build type used by the main build. -- `TRIGGER_WASM_BUILD` - Can be set to trigger a wasm build. On subsequent calls the value of the variable - needs to change. As wasm builder instructs `cargo` to watch for file changes - this environment variable should only be required in certain circumstances. +- `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable + needs to change. As wasm builder instructs `cargo` to watch for file changes + this environment variable should only be required in certain circumstances. - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. - `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index ab64db56fec3427466ca11407991f29628b7a555..bb50729f71ce572187bf91b52937d71df7f1191d 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -68,9 +68,9 @@ //! for `cargo check` runs. //! - `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. //! By default the build type is equal to the build type used by the main build. -//! - `TRIGGER_WASM_BUILD` - Can be set to trigger a wasm build. On subsequent calls the value of the variable -//! needs to change. As wasm builder instructs `cargo` to watch for file changes -//! this environment variable should only be required in certain circumstances. +//! - `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable +//! needs to change. As wasm builder instructs `cargo` to watch for file changes +//! this environment variable should only be required in certain circumstances. //! - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. //! - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. //! - `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs @@ -168,17 +168,29 @@ pub fn build_project_with_default_rustflags( default_rustflags, ); + let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { + ( + wasm_binary.wasm_binary_path_escaped(), + bloaty.wasm_binary_bloaty_path_escaped(), + ) + } else { + ( + bloaty.wasm_binary_bloaty_path_escaped(), + bloaty.wasm_binary_bloaty_path_escaped(), + ) + }; + write_file_if_changed( - file_name.into(), - format!( - r#" - pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); - pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); - "#, - wasm_binary = wasm_binary.wasm_binary_path_escaped(), - wasm_binary_bloaty = bloaty.wasm_binary_bloaty_path_escaped(), - ), - ); + file_name.into(), + format!( + r#" + pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); + pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); + "#, + wasm_binary = wasm_binary, + wasm_binary_bloaty = wasm_binary_bloaty, + ), + ); } /// Checks if the build of the WASM binary should be skipped. @@ -297,3 +309,8 @@ impl CargoCommand { .contains("-nightly") } } + +/// Returns `true` when color output is enabled. +fn color_output_enabled() -> bool { + env::var(crate::WASM_BUILD_NO_COLOR).is_err() +} diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index d7c15095762e8a17ec8effab1e6da2e7ecc2572d..2a9801744c45bb285d545f4c579342f69c8eff6d 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -18,14 +18,24 @@ use std::fs; use tempfile::tempdir; +use ansi_term::Color; + +/// Print an error message. +fn print_error_message(message: &str) -> String { + if super::color_output_enabled() { + Color::Red.bold().paint(message).to_string() + } else { + message.into() + } +} /// Checks that all prerequisites are installed. /// /// # Returns /// Returns `None` if everything was found and `Some(ERR_MSG)` if something could not be found. -pub fn check() -> Option<&'static str> { +pub fn check() -> Option { if !check_nightly_installed(){ - return Some("Rust nightly not installed, please install it!") + return Some(print_error_message("Rust nightly not installed, please install it!")) } check_wasm_toolchain_installed() @@ -35,7 +45,7 @@ fn check_nightly_installed() -> bool { crate::get_nightly_cargo().is_nightly() } -fn check_wasm_toolchain_installed() -> Option<&'static str> { +fn check_wasm_toolchain_installed() -> Option { let temp = tempdir().expect("Creating temp dir does not fail; qed"); fs::create_dir_all(temp.path().join("src")).expect("Creating src dir does not fail; qed"); @@ -59,22 +69,39 @@ fn check_wasm_toolchain_installed() -> Option<&'static str> { fs::write(&test_file, "pub fn test() {}") .expect("Writing to the test file does not fail; qed"); - let err_msg = "Rust WASM toolchain not installed, please install it!"; + let err_msg = print_error_message("Rust WASM toolchain not installed, please install it!"); let manifest_path = manifest_path.display().to_string(); - crate::get_nightly_cargo() - .command() - .args(&["build", "--target=wasm32-unknown-unknown", "--manifest-path", &manifest_path]) + + let mut build_cmd = crate::get_nightly_cargo().command(); + + build_cmd.args(&["build", "--target=wasm32-unknown-unknown", "--manifest-path", &manifest_path]); + + if super::color_output_enabled() { + build_cmd.arg("--color=always"); + } + + build_cmd .output() - .map_err(|_| err_msg) + .map_err(|_| err_msg.clone()) .and_then(|s| if s.status.success() { Ok(()) } else { match String::from_utf8(s.stderr) { Ok(ref err) if err.contains("linker `rust-lld` not found") => { - Err("`rust-lld` not found, please install it!") + Err(print_error_message("`rust-lld` not found, please install it!")) }, - _ => Err(err_msg) + Ok(ref err) => Err( + format!( + "{}\n\n{}\n{}\n{}{}\n", + err_msg, + Color::Yellow.bold().paint("Further error information:"), + Color::Yellow.bold().paint("-".repeat(60)), + err, + Color::Yellow.bold().paint("-".repeat(60)), + ) + ), + Err(_) => Err(err_msg), } } ) diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 6f8f47881b03fa83d01eda8ba54ad9d0eb1cc4fe..1d4a4484cf45e5026a3e9c6ad4f24a23d23c36e5 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -91,7 +91,7 @@ impl Drop for WorkspaceLock { pub fn create_and_compile( cargo_manifest: &Path, default_rustflags: &str, -) -> (WasmBinary, WasmBinaryBloaty) { +) -> (Option, WasmBinaryBloaty) { let wasm_workspace_root = get_wasm_workspace_root(); let wasm_workspace = wasm_workspace_root.join("wbuild"); @@ -113,7 +113,9 @@ pub fn create_and_compile( &wasm_workspace, ); - copy_wasm_to_target_directory(cargo_manifest, &wasm_binary); + wasm_binary.as_ref().map(|wasm_binary| + copy_wasm_to_target_directory(cargo_manifest, wasm_binary) + ); generate_rerun_if_changed_instructions(cargo_manifest, &project, &wasm_workspace); @@ -447,7 +449,7 @@ fn build_project(project: &Path, default_rustflags: &str) { // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); - if env::var(crate::WASM_BUILD_NO_COLOR).is_err() { + if super::color_output_enabled() { build_cmd.arg("--color=always"); } @@ -469,18 +471,23 @@ fn compact_wasm_file( project: &Path, cargo_manifest: &Path, wasm_workspace: &Path, -) -> (WasmBinary, WasmBinaryBloaty) { - let target = if is_release_build() { "release" } else { "debug" }; +) -> (Option, WasmBinaryBloaty) { + let is_release_build = is_release_build(); + let target = if is_release_build { "release" } else { "debug" }; let wasm_binary = get_wasm_binary_name(cargo_manifest); let wasm_file = wasm_workspace.join("target/wasm32-unknown-unknown") .join(target) .join(format!("{}.wasm", wasm_binary)); - let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); - - wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) - .expect("Failed to compact generated WASM binary."); + let wasm_compact_file = if is_release_build { + let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); + wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) + .expect("Failed to compact generated WASM binary."); + Some(WasmBinary(wasm_compact_file)) + } else { + None + }; - (WasmBinary(wasm_compact_file), WasmBinaryBloaty(wasm_file)) + (wasm_compact_file, WasmBinaryBloaty(wasm_file)) } /// Custom wrapper for a [`cargo_metadata::Package`] to store it in