diff --git a/.github/allowed-actions.js b/.github/allowed-actions.js new file mode 100644 index 0000000000000000000000000000000000000000..4fb894758060d4a3dbe57973fbb52bb8c326e786 --- /dev/null +++ b/.github/allowed-actions.js @@ -0,0 +1,7 @@ +// This is a whitelist of GitHub Actions that are approved for use in this project. +// If a new or existing workflow file is updated to use an action or action version +// not listed here, CI will fail. + +module.exports = [ + 'gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236', // gaurav-nelson/github-action-markdown-link-check@v1.0.8 +] diff --git a/.github/workflows/md-link-check.yml b/.github/workflows/md-link-check.yml index 75948534b3c194430e87e4a2fc6eb8c600c4618e..868569911d47185c08aff6d419ad56240a0dbc59 100644 --- a/.github/workflows/md-link-check.yml +++ b/.github/workflows/md-link-check.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - uses: gaurav-nelson/github-action-markdown-link-check@v1 + - uses: gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236 with: use-quiet-mode: 'yes' config-file: '.github/workflows/mlc_config.json' diff --git a/.github/workflows/mlc_config.json b/.github/workflows/mlc_config.json index f741e987b1b2a771d4b15a478a519ca8e868b60a..e7e620b39e0a9b2dd60eb57498ba99c1b6635443 100644 --- a/.github/workflows/mlc_config.json +++ b/.github/workflows/mlc_config.json @@ -1,7 +1,7 @@ { "ignorePatterns": [ { - "pattern": "^https://crates.io" + "pattern": "^https://crates.io", } ] } diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cbb56fcf7267936be9a9de2b433f8efe66745a05..07b0dd319cf792dbee99c64b5addc4307ab14dd5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,7 +24,6 @@ stages: - check - test - build - - post-build-test - chaos-env - chaos - publish @@ -197,7 +196,7 @@ cargo-check-benches: <<: *docker-env <<: *test-refs script: - - BUILD_DUMMY_WASM_BINARY=1 time cargo +nightly check --benches --all + - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all - cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small - cargo run --release -p node-bench -- ::trie::read::small - sccache -s @@ -208,7 +207,7 @@ cargo-check-subkey: <<: *test-refs script: - cd ./bin/utils/subkey - - BUILD_DUMMY_WASM_BINARY=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --release - sccache -s test-deterministic-wasm: @@ -222,7 +221,7 @@ test-deterministic-wasm: # build runtime - cargo build --verbose --release -p node-runtime # make checksum - - sha256sum target/release/wbuild/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 + - sha256sum target/release/wbuild/node-runtime/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 # clean up – FIXME: can we reuse some of the artifacts? - cargo clean # build again @@ -343,7 +342,7 @@ cargo-check-macos: <<: *docker-env <<: *test-refs script: - - BUILD_DUMMY_WASM_BINARY=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --release - sccache -s tags: - osx @@ -451,7 +450,7 @@ build-linux-subkey: &build-subkey - mkdir -p ./artifacts/subkey script: - cd ./bin/utils/subkey - - BUILD_DUMMY_WASM_BINARY=1 time cargo build --release --verbose + - SKIP_WASM_BUILD=1 time cargo build --release --verbose - cd - - mv ./target/release/subkey ./artifacts/subkey/. - echo -n "Subkey version = " @@ -471,7 +470,9 @@ build-rust-doc: stage: build <<: *docker-env <<: *test-refs - allow_failure: true + needs: + - job: test-linux-stable + artifacts: false variables: <<: *default-vars RUSTFLAGS: -Dwarnings @@ -483,30 +484,12 @@ build-rust-doc: - ./crate-docs/ script: - rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds - - BUILD_DUMMY_WASM_BINARY=1 RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" + - SKIP_WASM_BUILD=1 RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" time cargo +nightly doc --no-deps --workspace --all-features --verbose - mv ./target/doc ./crate-docs - echo "" > ./crate-docs/index.html - sccache -s -#### stage: post-build-test - -trigger-contracts-ci: - stage: post-build-test - needs: - - job: build-linux-substrate - artifacts: false - - job: test-linux-stable - artifacts: false - trigger: - project: parity/srml-contracts-waterfall - branch: master - strategy: depend - rules: - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - #### stage: chaos-env build-chaos-docker: @@ -677,6 +660,8 @@ publish-s3-doc: needs: - job: build-rust-doc artifacts: true + - job: build-linux-substrate + artifacts: false <<: *build-refs <<: *kubernetes-build variables: diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index aac37f0833c7d96f0a7da6939726e95fefcb1cb1..76f89eafbaeee28a638d8407a8058a4e214bfbdc 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -45,7 +45,7 @@ pub trait WeightInfo { /// Weights for {{pallet}} using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { {{~#each benchmarks as |benchmark|}} fn {{benchmark.name~}} ( @@ -54,6 +54,7 @@ impl WeightInfo for SubstrateWeight { ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{~/each}} {{~#if (ne benchmark.base_reads "0")}} @@ -82,6 +83,7 @@ impl WeightInfo for () { ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{~/each}} {{~#if (ne benchmark.base_reads "0")}} diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 73a5a36ff8af9e59cc91b470cd4d46595b776ee2..4a7e9869abf5e1cc238011a81ca584c4a567a8f7 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -9,6 +9,7 @@ # polkadot companion: paritytech/polkadot#567 # +set -e github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" # use github api v3 in order to access the data without authentication @@ -44,6 +45,7 @@ cargo install -f --version 0.2.0 diener # Merge master into our branch before building Polkadot to make sure we don't miss # any commits that are required by Polkadot. +git fetch --depth 20 origin git merge origin/master # Clone the current Polkadot master branch into ./polkadot. @@ -92,3 +94,6 @@ cd polkadot # Test Polkadot pr or master branch with this Substrate commit. cargo update -p sp-io time cargo test --all --release --verbose --features=real-overseer + +cd parachain/test-parachains/adder/collator/ +time cargo test --release --verbose --locked --features=real-overseer diff --git a/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json b/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json index 629b22617b22a970eb1f439179f62711d1bac9b4..a61e8a49bade752ea321ffe62528f24378442834 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json @@ -756,108 +756,6 @@ "alignLevel": null } }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 12 - }, - "hiddenSeries": false, - "id": 23, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_active{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} active", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_failed{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} failed", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_importing{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} importing", - "refId": "C" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_pending{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} pending", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Sync Proof", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, { "aliasColors": {}, "bars": false, diff --git a/Cargo.lock b/Cargo.lock index 3b905e4bd87ebfdd85da52bb5b28f97c5146a7c9..c775fbb062e16c39163db9edee67b8b156905b8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,15 +79,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "ahash" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29661b60bec623f0586702976ff4d0c9942dcb6723161c2df0eea78455cfedfb" -dependencies = [ - "const-random", -] - [[package]] name = "ahash" version = "0.3.8" @@ -109,17 +100,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex", - "num-traits", -] - [[package]] name = "ansi_term" version = "0.11.0" @@ -264,9 +244,9 @@ dependencies = [ [[package]] name = "async-io" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a0b2bb8ae20fede194e779150fe283f65a4a08461b496de546ec366b174ad9" +checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" dependencies = [ "concurrent-queue", "fastrand", @@ -312,7 +292,7 @@ dependencies = [ "memchr", "num_cpus", "once_cell", - "pin-project-lite", + "pin-project-lite 0.1.11", "pin-utils", "slab", "wasm-bindgen-futures", @@ -326,13 +306,13 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-tls" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d85a97c4a0ecce878efd3f945f119c78a646d8975340bca0398f9bb05c30cc52" +checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400" dependencies = [ "futures-core", "futures-io", - "rustls", + "rustls 0.19.0", "webpki", "webpki-roots", ] @@ -439,7 +419,7 @@ dependencies = [ "cfg-if 0.1.10", "clang-sys", "clap", - "env_logger", + "env_logger 0.7.1", "lazy_static", "lazycell", "log", @@ -500,17 +480,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "blake2s_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - [[package]] name = "block-buffer" version = "0.7.3" @@ -571,12 +540,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "bs58" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" - [[package]] name = "bs58" version = "0.4.0" @@ -653,13 +616,12 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cargo_metadata" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8de60b887edf6d74370fc8eb177040da4847d971d6234c7b13a6da324ef0caf" +checksum = "d5a5f7b42f606b7f23674f6f4d877628350682bc40687d3fae65679a58d55345" dependencies = [ - "semver 0.9.0", + "semver 0.11.0", "serde", - "serde_derive", "serde_json", ] @@ -754,6 +716,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "ckb-merkle-mountain-range" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e486fe53bb9f2ca0f58cb60e8679a5354fd6687a839942ef0a75967250289ca6" +dependencies = [ + "cfg-if 0.1.10", +] + [[package]] name = "clang-sys" version = "0.29.3" @@ -819,34 +790,14 @@ dependencies = [ [[package]] name = "console_log" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7871d2947441b0fdd8e2bd1ce2a2f75304f896582c0d572162d48290683c48" +checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494" dependencies = [ "log", "web-sys", ] -[[package]] -name = "const-random" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dc82c12dc2ee6e1ded861cf7d582b46f66f796d1b6c93fa28b911ead95da02" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc757bbb9544aa296c2ae00c679e81f886b37e28e59097defe0cf524306f6685" -dependencies = [ - "getrandom 0.2.0", - "proc-macro-hack", -] - [[package]] name = "const_fn" version = "0.4.3" @@ -1268,6 +1219,15 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "directories" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.3.5" @@ -1377,6 +1337,19 @@ dependencies = [ "syn", ] +[[package]] +name = "env_logger" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b0a4d2e39f8420210be8b27eeda28029729e2fd4291019455016c348240c38" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "env_logger" version = "0.7.1" @@ -1426,106 +1399,12 @@ dependencies = [ "libc", ] -[[package]] -name = "ethbloom" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a6567e6fd35589fea0c63b94b4cf2e55573e413901bdbe60ab15cf0e25e5df" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] - -[[package]] -name = "ethereum" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df706418ff7d3874b9506424b04ea0bef569a2b39412b43a27ea86e679be108e" -dependencies = [ - "ethereum-types", - "hash-db", - "hash256-std-hasher", - "parity-scale-codec", - "rlp", - "rlp-derive", - "serde", - "sha3 0.9.1", - "triehash", -] - -[[package]] -name = "ethereum-types" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473aecff686bd8e7b9db0165cbbb53562376b39bf35b427f0c60446a9e1634b0" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - [[package]] name = "event-listener" version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" -[[package]] -name = "evm" -version = "0.17.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16c8deca0ec3efa361b03d9cae6fe94321a1d2d0a523437edd720b3d140e3c08" -dependencies = [ - "ethereum", - "evm-core", - "evm-gasometer", - "evm-runtime", - "log", - "parity-scale-codec", - "primitive-types", - "rlp", - "serde", - "sha3 0.8.2", -] - -[[package]] -name = "evm-core" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2d732b3c36df36833761cf67df8f65866be1d368d20508bc3e13e6f256c8c5" -dependencies = [ - "log", - "primitive-types", -] - -[[package]] -name = "evm-gasometer" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46de1b91ccd744627484183729f1b5af484b3bf15505007fc28cc54264cb9ea1" -dependencies = [ - "evm-core", - "evm-runtime", - "primitive-types", -] - -[[package]] -name = "evm-runtime" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c1d1ffe96f833788512c890d702457d790dba4917ac6f64f8f60fbd9bc40b8" -dependencies = [ - "evm-core", - "primitive-types", - "sha3 0.8.2", -] - [[package]] name = "exit-future" version = "0.2.0" @@ -1593,7 +1472,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" dependencies = [ - "env_logger", + "env_logger 0.7.1", "log", ] @@ -1689,6 +1568,7 @@ dependencies = [ name = "frame-benchmarking-cli" version = "2.0.0" dependencies = [ + "Inflector", "chrono", "frame-benchmarking", "handlebars", @@ -1801,6 +1681,7 @@ version = "2.0.0" dependencies = [ "frame-metadata", "frame-support", + "frame-system", "parity-scale-codec", "pretty_assertions", "rustversion", @@ -1867,16 +1748,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "fs2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" -dependencies = [ - "libc", - "winapi 0.3.9", -] - [[package]] name = "fs_extra" version = "1.2.0" @@ -1997,7 +1868,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite", + "pin-project-lite 0.1.11", "waker-fn", ] @@ -2105,6 +1976,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "generic-array" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd" +dependencies = [ + "typenum", +] + [[package]] name = "generic-array" version = "0.14.4" @@ -2127,17 +2007,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "getrandom" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "ghash" version = "0.3.0" @@ -2269,16 +2138,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" -dependencies = [ - "ahash 0.2.19", - "autocfg 0.1.7", -] - [[package]] name = "hashbrown" version = "0.8.2" @@ -2506,7 +2365,7 @@ dependencies = [ "futures-util", "hyper 0.13.9", "log", - "rustls", + "rustls 0.18.1", "rustls-native-certs", "tokio 0.2.23", "tokio-rustls", @@ -2557,21 +2416,28 @@ dependencies = [ ] [[package]] -name = "impl-codec" -version = "0.4.2" +name = "if-watch" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +checksum = "16d7c5e361e6b05c882b4847dd98992534cebc6fcde7f4bc98225bcf10fd6d0d" dependencies = [ - "parity-scale-codec", + "async-io", + "futures 0.3.8", + "futures-lite", + "if-addrs", + "ipnet", + "libc", + "log", + "winapi 0.3.9", ] [[package]] -name = "impl-rlp" -version = "0.2.1" +name = "impl-codec" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f7a72f11830b52333f36e3b09a288333888bf54380fd0ac0790a3c31ab0f3c5" +checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" dependencies = [ - "rlp", + "parity-scale-codec", ] [[package]] @@ -2689,9 +2555,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85a7e2c92a4804dd459b86c339278d0fe87cf93757fae222c3fa3ae75458bc73" +checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" dependencies = [ "wasm-bindgen", ] @@ -2931,9 +2797,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.80" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" +checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" [[package]] name = "libloading" @@ -2953,9 +2819,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.30.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c2b4c99f8798be90746fc226acf95d3e6cff0655883634cc30dab1f64f438b" +checksum = "022cdac4ab124be12de581e591796d4dfb7d1f1eef94669d2c1eaa0e98dd2f0e" dependencies = [ "atomic", "bytes 0.5.6", @@ -2982,7 +2848,6 @@ dependencies = [ "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "multihash", "parity-multiaddr", "parking_lot 0.11.1", "pin-project 1.0.2", @@ -2992,12 +2857,13 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.24.0" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8186060d6bd415e4e928e6cb44c4fe7e7a7dd53437bd936ce7e5f421e45a51" +checksum = "cc9c96d3a606a696a3a6c0ad3c3352c57bda2082ec9090930f1bd9daf787039f" dependencies = [ "asn1_der", - "bs58 0.4.0", + "bs58", + "bytes 0.5.6", "ed25519-dalek", "either", "fnv", @@ -3019,16 +2885,16 @@ dependencies = [ "sha2 0.9.2", "smallvec 1.5.0", "thiserror", - "unsigned-varint 0.5.1", + "unsigned-varint", "void", "zeroize", ] [[package]] name = "libp2p-core-derive" -version = "0.20.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f753d9324cd3ec14bf04b8a8cd0d269c87f294153d6bf2a84497a63a5ad22213" +checksum = "f4bc40943156e42138d22ed3c57ff0e1a147237742715937622a99b10fbe0156" dependencies = [ "quote", "syn", @@ -3036,9 +2902,9 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aea69349e70a58ef9ecd21ac12c5eaa36255ac6986828079d26393f9e618cb" +checksum = "5a579d7dd506d0620ba88ccc1754436b7de35ed6c884234f9a226bbfce382640" dependencies = [ "flate2", "futures 0.3.8", @@ -3047,9 +2913,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0baeff71fb5cb1fe1604f74a712a44b66a8c5900f4022411a1d550f09d6bb776" +checksum = "15dea5933f570844d7b5222b12b58f7bd52e9ca38cd65a1bd4f35341f053f012" dependencies = [ "futures 0.3.8", "libp2p-core", @@ -3058,9 +2924,9 @@ dependencies = [ [[package]] name = "libp2p-floodsub" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0f925a45f310b678e70faf71a10023b829d02eb9cc2628a63de928936f3ade" +checksum = "23070a0838bd9a8adb27e6eba477eeb650c498f9d139383dd0135d20a8170253" dependencies = [ "cuckoofilter", "fnv", @@ -3076,9 +2942,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efeb65567174974f551a91f9f5719445b6695cad56f6a7a47a27111f37efb6b8" +checksum = "65e8f3aa0906fbad435dac23c177eef3cdfaaf62609791bd7f54f8553edcfdf9" dependencies = [ "base64 0.13.0", "byteorder", @@ -3096,15 +2962,15 @@ dependencies = [ "rand 0.7.3", "sha2 0.9.2", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e074124669840484de564901d47f2d0892e73f6d8ee7c37e9c2644af1b217bf4" +checksum = "802fb973a7e0dde3fb9a2113a62bad90338ebe01983b706e1d576d0c2af93cda" dependencies = [ "futures 0.3.8", "libp2p-core", @@ -3118,9 +2984,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a2653b2e3254a3bbeb66bfc3f0dca7d6cba6aa2a96791db114003dec1b5394" +checksum = "6506b7b7982f7626fc96a91bc61be4b1fe7ae9ac23824f0ecefcce21cb39238c" dependencies = [ "arrayvec 0.5.2", "bytes 0.5.6", @@ -3131,45 +2997,43 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "multihash", "prost", "prost-build", "rand 0.7.3", "sha2 0.9.2", "smallvec 1.5.0", "uint", - "unsigned-varint 0.5.1", + "unsigned-varint", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786b068098794322239f8f04df88a52daeb7863b2e77501c4d85d32e0a8f2d26" +checksum = "7b934ee03a361f317df7d75defa4177b285534c58f49d5e6e240278e13ef3f65" dependencies = [ - "async-std", + "async-io", "data-encoding", "dns-parser", - "either", "futures 0.3.8", + "if-watch", "lazy_static", "libp2p-core", "libp2p-swarm", "log", - "net2", "rand 0.7.3", "smallvec 1.5.0", + "socket2", "void", - "wasm-timer", ] [[package]] name = "libp2p-mplex" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed764eab613a8fb6b7dcf6c796f55a06fef2270e528329903e25cd3311b99663" +checksum = "ae2132b14045009b0f8e577a06e1459592ef0a89dedc58f3d4baf4eac956837b" dependencies = [ "bytes 0.5.6", "futures 0.3.8", @@ -3180,14 +3044,14 @@ dependencies = [ "parking_lot 0.11.1", "rand 0.7.3", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint", ] [[package]] name = "libp2p-noise" -version = "0.26.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb441fb015ec16690099c5d910fcba271d357763b3dcb784db7b27bbb0b68372" +checksum = "b9610a524bef4db383cd96b4ec3ec4722eafa72c7242fa89990b74166760583d" dependencies = [ "bytes 0.5.6", "curve25519-dalek 3.0.0", @@ -3207,9 +3071,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e5c50936cfdbe96a514e8992f304fa44cd3a681b6f779505f1ae62b3474705" +checksum = "659adf89356e04f65398bb74ee791b269e63da9e41b37f8dc19eaacd12487bfe" dependencies = [ "futures 0.3.8", "libp2p-core", @@ -3222,9 +3086,9 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.24.1" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21026557c335d3639591f247b19b7536195772034ec7e9c463137227f95eaaa1" +checksum = "96dfe26270c91d4ff095030d1fcadd602f3fd84968ebd592829916d0715798a6" dependencies = [ "bytes 0.5.6", "futures 0.3.8", @@ -3233,7 +3097,7 @@ dependencies = [ "log", "prost", "prost-build", - "unsigned-varint 0.5.1", + "unsigned-varint", "void", ] @@ -3248,14 +3112,14 @@ dependencies = [ "pin-project 0.4.27", "rand 0.7.3", "salsa20", - "sha3 0.9.1", + "sha3", ] [[package]] name = "libp2p-request-response" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dd9a1e0e6563dec1c9e702f7e68bdaa43da62a84536aa06372d3fed3e25d4ca" +checksum = "bd96c3580fe59a9379ac7906c2f61c7f5ad3b7515362af0e72153a7cc9a45550" dependencies = [ "async-trait", "bytes 0.5.6", @@ -3263,19 +3127,19 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "lru 0.6.1", + "lru", "minicbor", "rand 0.7.3", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint", "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "565f0e06674b4033c978471e4083d5aaa8e03cef0719a0ec0905aaeaad39a919" +checksum = "de333c483f27d02ecf7b6cef814a36f5e1876f15139eefb00225c405350e1c22" dependencies = [ "either", "futures 0.3.8", @@ -3289,9 +3153,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.24.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33f3dce259c0d3127af5167f45c275b6c047320efdd0e40fde947482487af0a3" +checksum = "bc28c9ad6dc43f4c3950411cf808639d90307a076330e7996e5e94e70279bde0" dependencies = [ "async-std", "futures 0.3.8", @@ -3305,9 +3169,9 @@ dependencies = [ [[package]] name = "libp2p-uds" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0aba04370a00d8d0236e350bc862926c1b42542a169aa6a481e660e5b990fe" +checksum = "9d821208d4b9af4b293a56dde470edd9f9fac8bb94a51f4f5327cc29a471b3f3" dependencies = [ "async-std", "futures 0.3.8", @@ -3317,9 +3181,9 @@ dependencies = [ [[package]] name = "libp2p-wasm-ext" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c703816f4170477a375b49c56d349e535ce68388f81ba1d9a3c8e2517effa82" +checksum = "1e6ef400b231ba78e866b860445480ca21ee447e03034138c6d57cf2969d6bf4" dependencies = [ "futures 0.3.8", "js-sys", @@ -3331,9 +3195,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.25.0" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d5e7268a959748040a0cf7456ad655be55b87f0ceda03bdb5b53674726b28f7" +checksum = "522a877ce42ededf1f5dd011dbc40ea116f1776818f09dacb3d7a206f3ad6305" dependencies = [ "async-tls", "either", @@ -3341,7 +3205,7 @@ dependencies = [ "libp2p-core", "log", "quicksink", - "rustls", + "rustls 0.19.0", "rw-stream-sink", "soketto", "url 2.2.0", @@ -3351,9 +3215,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a0798cbb58535162c40858493d09af06eac42a26e4966e58de0df701f559348" +checksum = "3be7ac000fa3e42ac09a6e658e48de34ac8ef9fff64a4e6e6b08dcc8f4b0e5f6" dependencies = [ "futures 0.3.8", "libp2p-core", @@ -3418,11 +3282,10 @@ dependencies = [ [[package]] name = "linregress" -version = "0.1.7" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9290cf6f928576eeb9c096c6fad9d8d452a0a1a70a2bbffa6e36064eedc0aac9" +checksum = "0d0ad4b5cc8385a881c561fac3501353d63d2a2b7a357b5064d71815c9a92724" dependencies = [ - "failure", "nalgebra", "statrs", ] @@ -3485,24 +3348,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "lru" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" -dependencies = [ - "hashbrown 0.6.3", -] - -[[package]] -name = "lru" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c456c123957de3a220cd03786e0d86aa542a88b46029973b542f426da6ef34" -dependencies = [ - "hashbrown 0.6.3", -] - [[package]] name = "lru" version = "0.6.1" @@ -3731,17 +3576,29 @@ checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" [[package]] name = "multihash" -version = "0.11.4" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567122ab6492f49b59def14ecc36e13e64dca4188196dd0cd41f9f3f979f3df6" +checksum = "fb63389ee5fcd4df3f8727600f4a0c3df53c541f0ed4e8b50a9ae51a80fc1efe" dependencies = [ - "blake2b_simd", - "blake2s_simd", "digest 0.9.0", - "sha-1 0.9.2", + "generic-array 0.14.4", + "multihash-derive", "sha2 0.9.2", - "sha3 0.9.1", - "unsigned-varint 0.5.1", + "unsigned-varint", +] + +[[package]] +name = "multihash-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f5653449cd45d502a53480ee08d7a599e8f4893d2bacb33c63d65bc20af6c1a" +dependencies = [ + "proc-macro-crate", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] @@ -3752,32 +3609,33 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" -version = "0.8.5" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93faf2e41f9ee62fb01680ed48f3cc26652352327aa2e59869070358f6b7dd75" +checksum = "dda822043bba2d6da31c4e14041f9794f8fb130a5959289038d0b809d8888614" dependencies = [ "bytes 0.5.6", "futures 0.3.8", "log", "pin-project 1.0.2", "smallvec 1.5.0", - "unsigned-varint 0.5.1", + "unsigned-varint", ] [[package]] name = "nalgebra" -version = "0.18.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" +checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486" dependencies = [ - "alga", "approx", - "generic-array 0.12.3", + "generic-array 0.13.2", "matrixmultiply", "num-complex", "num-rational", "num-traits", - "rand 0.6.5", + "rand 0.7.3", + "rand_distr", + "simba", "typenum", ] @@ -4075,10 +3933,12 @@ dependencies = [ "frame-system-rpc-runtime-api", "hex-literal", "node-primitives", + "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-balances", + "pallet-bounties", "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", @@ -4090,6 +3950,7 @@ dependencies = [ "pallet-im-online", "pallet-indices", "pallet-membership", + "pallet-mmr", "pallet-multisig", "pallet-offences", "pallet-offences-benchmarking", @@ -4104,6 +3965,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-sudo", "pallet-timestamp", + "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-treasury", @@ -4127,7 +3989,7 @@ dependencies = [ "sp-transaction-pool", "sp-version", "static_assertions", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -4146,6 +4008,7 @@ dependencies = [ "sc-consensus-aura", "sc-executor", "sc-finality-grandpa", + "sc-keystore", "sc-rpc", "sc-rpc-api", "sc-service", @@ -4198,7 +4061,7 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -4408,8 +4271,10 @@ dependencies = [ name = "pallet-assets" version = "2.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "pallet-balances", "parity-scale-codec", "serde", "sp-core", @@ -4534,6 +4399,24 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-bounties" +version = "2.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-treasury", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-storage", +] + [[package]] name = "pallet-collective" version = "2.0.0" @@ -4686,28 +4569,6 @@ dependencies = [ "substrate-test-utils", ] -[[package]] -name = "pallet-evm" -version = "2.0.0" -dependencies = [ - "evm", - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "pallet-balances", - "pallet-timestamp", - "parity-scale-codec", - "primitive-types", - "ripemd160", - "rlp", - "serde", - "sha3 0.8.2", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-example" version = "2.0.0" @@ -4849,6 +4710,24 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-mmr" +version = "2.0.0" +dependencies = [ + "ckb-merkle-mountain-range", + "env_logger 0.5.13", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-multisig" version = "2.0.0" @@ -5178,6 +5057,24 @@ dependencies = [ "sp-timestamp", ] +[[package]] +name = "pallet-tips" +version = "2.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-treasury", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-storage", +] + [[package]] name = "pallet-transaction-payment" version = "2.0.0" @@ -5233,6 +5130,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "impl-trait-for-tuples", "pallet-balances", "parity-scale-codec", "serde", @@ -5294,19 +5192,19 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.9.6" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43244a26dc1ddd3097216bb12eaa6cf8a07b060c72718d9ebd60fd297d6401df" +checksum = "2f51a30667591b14f96068b2d12f1306d07a41ebd98239d194356d4d9707ac16" dependencies = [ "arrayref", - "bs58 0.4.0", + "bs58", "byteorder", "data-encoding", "multihash", "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.5.1", + "unsigned-varint", "url 2.2.0", ] @@ -5367,10 +5265,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ "cfg-if 0.1.10", - "ethereum-types", "hashbrown 0.8.2", "impl-trait-for-tuples", - "lru 0.5.3", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", @@ -5671,6 +5567,12 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" +[[package]] +name = "pin-project-lite" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" + [[package]] name = "pin-utils" version = "0.1.0" @@ -5785,7 +5687,6 @@ checksum = "7dd39dcacf71411ba488570da7bbc89b717225e46478b30ba99b92db6b149809" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", "impl-serde", "uint", ] @@ -5949,7 +5850,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" dependencies = [ - "env_logger", + "env_logger 0.7.1", "log", "rand 0.7.3", "rand_core 0.5.1", @@ -5963,7 +5864,7 @@ checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" dependencies = [ "futures-core", "futures-sink", - "pin-project-lite", + "pin-project-lite 0.1.11", ] [[package]] @@ -6004,19 +5905,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "rand" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -dependencies = [ - "cloudabi 0.0.3", - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "winapi 0.3.9", -] - [[package]] name = "rand" version = "0.6.5" @@ -6042,7 +5930,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -6091,7 +5979,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom", +] + +[[package]] +name = "rand_distr" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" +dependencies = [ + "rand 0.7.3", ] [[package]] @@ -6238,7 +6135,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", + "getrandom", "redox_syscall", "rust-argon2", ] @@ -6344,37 +6241,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "rlp" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1190dcc8c3a512f1eef5d09bb8c84c7f39e1054e174d1795482e18f5272f2e73" -dependencies = [ - "rustc-hex", -] - -[[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "rocksdb" version = "0.15.0" @@ -6387,9 +6253,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "4.0.5" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" +checksum = "d755237fc0f99d98641540e66abac8bc46a0652f19148ac9e21de2da06b326c9" dependencies = [ "libc", "winapi 0.3.9", @@ -6447,6 +6313,19 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +dependencies = [ + "base64 0.13.0", + "log", + "ring", + "sct", + "webpki", +] + [[package]] name = "rustls-native-certs" version = "0.4.0" @@ -6454,7 +6333,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.18.1", "schannel", "security-framework", ] @@ -6514,7 +6393,6 @@ name = "sc-authority-discovery" version = "0.8.0" dependencies = [ "async-trait", - "bytes 0.5.6", "derive_more", "either", "futures 0.3.8", @@ -6527,7 +6405,6 @@ dependencies = [ "quickcheck", "rand 0.7.3", "sc-client-api", - "sc-keystore", "sc-network", "sc-peerset", "serde_json", @@ -6625,7 +6502,6 @@ dependencies = [ "fdlimit", "futures 0.3.8", "hex", - "lazy_static", "libp2p", "log", "names", @@ -6678,7 +6554,6 @@ dependencies = [ "fnv", "futures 0.3.8", "hash-db", - "hex-literal", "kvdb", "kvdb-memorydb", "lazy_static", @@ -6686,7 +6561,6 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.10.2", "sc-executor", - "sc-telemetry", "sp-api", "sp-blockchain", "sp-consensus", @@ -6694,7 +6568,6 @@ dependencies = [ "sp-database", "sp-externalities", "sp-inherents", - "sp-keyring", "sp-keystore", "sp-runtime", "sp-state-machine", @@ -6707,6 +6580,7 @@ dependencies = [ "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime", + "thiserror", ] [[package]] @@ -6970,6 +6844,7 @@ dependencies = [ "sp-state-machine", "sp-trie", "substrate-test-runtime-client", + "thiserror", ] [[package]] @@ -7030,14 +6905,13 @@ name = "sc-executor-common" version = "0.8.0" dependencies = [ "derive_more", - "log", "parity-scale-codec", "parity-wasm 0.41.0", "sp-allocator", "sp-core", - "sp-runtime-interface", "sp-serializer", "sp-wasm-interface", + "thiserror", "wasmi", ] @@ -7211,7 +7085,7 @@ dependencies = [ "async-std", "async-trait", "bitflags", - "bs58 0.3.1", + "bs58", "bytes 0.5.6", "derive_more", "either", @@ -7227,10 +7101,10 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log", - "lru 0.4.3", + "lru", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "pin-project 0.4.27", "prost", "prost-build", @@ -7243,7 +7117,7 @@ dependencies = [ "serde_json", "slog", "slog_derive", - "smallvec 0.6.13", + "smallvec 1.5.0", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -7258,7 +7132,7 @@ dependencies = [ "substrate-test-runtime-client", "tempfile", "thiserror", - "unsigned-varint 0.4.0", + "unsigned-varint", "void", "wasm-timer", "zeroize", @@ -7273,7 +7147,7 @@ dependencies = [ "futures-timer 3.0.2", "libp2p", "log", - "lru 0.4.3", + "lru", "quickcheck", "rand 0.7.3", "sc-network", @@ -7377,11 +7251,13 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.10.2", "sc-block-builder", + "sc-cli", "sc-client-api", "sc-executor", "sc-keystore", "sc-network", "sc-rpc-api", + "sc-tracing", "sc-transaction-pool", "serde_json", "sp-api", @@ -7453,7 +7329,7 @@ dependencies = [ "sp-sandbox", "sp-std", "sp-tasks", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -7461,8 +7337,7 @@ name = "sc-service" version = "0.8.0" dependencies = [ "async-std", - "derive_more", - "directories", + "directories 3.0.1", "exit-future", "futures 0.1.30", "futures 0.3.8", @@ -7520,6 +7395,7 @@ dependencies = [ "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", + "thiserror", "tokio 0.2.23", "tracing", "tracing-futures", @@ -7573,6 +7449,7 @@ dependencies = [ "parking_lot 0.10.2", "sc-client-api", "sp-core", + "thiserror", ] [[package]] @@ -7591,6 +7468,7 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-runtime", + "thiserror", ] [[package]] @@ -7617,9 +7495,13 @@ dependencies = [ name = "sc-tracing" version = "2.0.0" dependencies = [ + "ansi_term 0.12.1", "erased-serde", + "lazy_static", "log", + "once_cell", "parking_lot 0.10.2", + "regex", "rustc-hash", "sc-telemetry", "serde", @@ -7628,6 +7510,7 @@ dependencies = [ "sp-tracing", "tracing", "tracing-core", + "tracing-log", "tracing-subscriber", ] @@ -7652,6 +7535,7 @@ dependencies = [ "sp-transaction-pool", "sp-utils", "substrate-test-runtime", + "thiserror", "wasm-timer", ] @@ -7660,7 +7544,6 @@ name = "sc-transaction-pool" version = "2.0.0" dependencies = [ "assert_matches", - "derive_more", "futures 0.3.8", "futures-diagnose", "hex", @@ -7684,6 +7567,7 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", + "thiserror", "wasm-timer", ] @@ -7706,10 +7590,11 @@ dependencies = [ "arrayref", "arrayvec 0.5.2", "curve25519-dalek 2.1.0", - "getrandom 0.1.15", + "getrandom", "merlin", "rand 0.7.3", "rand_core 0.5.1", + "serde", "sha2 0.8.2", "subtle 2.3.0", "zeroize", @@ -7759,9 +7644,9 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" dependencies = [ "zeroize", ] @@ -7795,7 +7680,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", ] [[package]] @@ -7804,7 +7689,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.1", "serde", ] @@ -7815,10 +7709,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] -name = "send_wrapper" -version = "0.2.0" +name = "semver-parser" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" +checksum = "42ef146c2ad5e5f4b037cd6ce2ebb775401729b19a82040c1beac9d36c7d1428" +dependencies = [ + "pest", +] [[package]] name = "send_wrapper" @@ -7923,19 +7820,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "sha3" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" -dependencies = [ - "block-buffer 0.7.3", - "byte-tools", - "digest 0.8.1", - "keccak", - "opaque-debug 0.2.3", -] - [[package]] name = "sha3" version = "0.9.1" @@ -7979,6 +7863,18 @@ version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" +[[package]] +name = "simba" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb931b1367faadea6b1ab1c306a860ec17aaa5fa39f367d0c744e69d971a1fb2" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste 0.1.18", +] + [[package]] name = "slab" version = "0.4.2" @@ -8064,11 +7960,11 @@ dependencies = [ [[package]] name = "socket2" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd8b795c389288baa5f355489c65e71fd48a02104600d15c4cfbc561e9e429d" +checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "redox_syscall", "winapi 0.3.9", @@ -8094,11 +7990,11 @@ dependencies = [ name = "sp-allocator" version = "2.0.0" dependencies = [ - "derive_more", "log", "sp-core", "sp-std", "sp-wasm-interface", + "thiserror", ] [[package]] @@ -8114,6 +8010,7 @@ dependencies = [ "sp-std", "sp-test-primitives", "sp-version", + "thiserror", ] [[package]] @@ -8232,11 +8129,12 @@ dependencies = [ name = "sp-blockchain" version = "2.0.0" dependencies = [ + "futures 0.3.8", "log", - "lru 0.4.3", + "lru", "parity-scale-codec", "parking_lot 0.10.2", - "sp-block-builder", + "sp-api", "sp-consensus", "sp-database", "sp-runtime", @@ -8489,6 +8387,7 @@ dependencies = [ "rand 0.7.3", "rand_chacha 0.2.2", "schnorrkel", + "serde", "sp-core", "sp-externalities", ] @@ -8544,7 +8443,6 @@ name = "sp-panic-handler" version = "2.0.0" dependencies = [ "backtrace", - "log", ] [[package]] @@ -8573,7 +8471,6 @@ dependencies = [ "sp-application-crypto", "sp-arithmetic", "sp-core", - "sp-inherents", "sp-io", "sp-state-machine", "sp-std", @@ -8583,6 +8480,7 @@ dependencies = [ name = "sp-runtime-interface" version = "2.0.0" dependencies = [ + "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", "rustversion", @@ -8635,7 +8533,7 @@ dependencies = [ "sp-io", "sp-runtime-interface", "sp-std", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -8646,7 +8544,7 @@ dependencies = [ "sp-io", "sp-runtime-interface", "sp-std", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -8794,6 +8692,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-runtime", + "thiserror", ] [[package]] @@ -8866,11 +8765,11 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "statrs" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" +checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382" dependencies = [ - "rand 0.5.6", + "rand 0.7.3", ] [[package]] @@ -9132,7 +9031,7 @@ dependencies = [ "sp-trie", "sp-version", "substrate-test-runtime-client", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", "trie-db", ] @@ -9202,24 +9101,18 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" -version = "2.0.1" +version = "3.0.0" dependencies = [ "ansi_term 0.12.1", "atty", "build-helper", "cargo_metadata", - "fs2", - "itertools 0.8.2", "tempfile", "toml", "walkdir", "wasm-gc-api", ] -[[package]] -name = "substrate-wasm-builder-runner" -version = "2.0.0" - [[package]] name = "subtle" version = "1.0.0" @@ -9453,7 +9346,7 @@ dependencies = [ "mio", "mio-uds", "num_cpus", - "pin-project-lite", + "pin-project-lite 0.1.11", "signal-hook-registry", "slab", "tokio-macros", @@ -9574,7 +9467,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", - "rustls", + "rustls 0.18.1", "tokio 0.2.23", "webpki", ] @@ -9684,7 +9577,7 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite", + "pin-project-lite 0.1.11", "tokio 0.2.23", ] @@ -9705,13 +9598,13 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "log", - "pin-project-lite", + "pin-project-lite 0.2.0", "tracing-attributes", "tracing-core", ] @@ -9843,16 +9736,6 @@ dependencies = [ "keccak-hasher", ] -[[package]] -name = "triehash" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f490aa7aa4e4d07edeba442c007e42e3e7f43aafb5112c5b047fff0b1aa5449c" -dependencies = [ - "hash-db", - "rlp", -] - [[package]] name = "try-lock" version = "0.2.3" @@ -9862,8 +9745,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "trybuild" version = "1.0.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7d30fe369fd650072b352b1a9cb9587669de6b89be3b8225544012c1c45292d" +source = "git+https://github.com/bkchr/trybuild.git?branch=bkchr-use-workspace-cargo-lock#0eaad05ba8a32a743751ff52b57a7d9f57da4869" dependencies = [ "dissimilar", "glob", @@ -9964,18 +9846,6 @@ dependencies = [ "subtle 2.3.0", ] -[[package]] -name = "unsigned-varint" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" -dependencies = [ - "bytes 0.5.6", - "futures-io", - "futures-util", - "futures_codec", -] - [[package]] name = "unsigned-varint" version = "0.5.1" @@ -10135,9 +10005,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95f8d235a77f880bcef268d379810ea6c0af2eacfa90b1ad5af731776e0c4699" +checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" dependencies = [ "cfg-if 0.1.10", "js-sys", @@ -10176,9 +10046,9 @@ checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" [[package]] name = "wasm-bindgen-test" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d92df9d5715606f9e48f85df3b78cb77ae44a2ea9a5f2a785a97bd0066b9300" +checksum = "34d1cdc8b98a557f24733d50a1199c4b0635e465eecba9c45b214544da197f64" dependencies = [ "console_error_panic_hook", "js-sys", @@ -10190,9 +10060,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51611ce8e84cba89379d91fc5074bacc5530f69da1c09a2853d906129d12b3b8" +checksum = "e8fb9c67be7439ee8ab1b7db502a49c05e51e2835b66796c705134d9b8e1a585" dependencies = [ "proc-macro2", "quote", @@ -10211,15 +10081,14 @@ dependencies = [ [[package]] name = "wasm-timer" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ "futures 0.3.8", "js-sys", - "parking_lot 0.9.0", + "parking_lot 0.11.1", "pin-utils", - "send_wrapper 0.2.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -10316,7 +10185,7 @@ dependencies = [ "cranelift-entity", "cranelift-frontend", "cranelift-wasm", - "directories", + "directories 2.0.2", "errno", "file-per-thread-logger", "indexmap", @@ -10456,9 +10325,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" dependencies = [ "webpki", ] @@ -10561,9 +10430,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index b78c4da05580185bdd307e59955cfc3278f36c56..61282189da38a0445a1aeaad16979aeba6f44f74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,19 +1,19 @@ [workspace] members = [ "bin/node-template/node", - "bin/node-template/runtime", "bin/node-template/pallets/template", + "bin/node-template/runtime", "bin/node/bench", "bin/node/browser-testing", "bin/node/cli", "bin/node/executor", "bin/node/primitives", - "bin/node/rpc-client", "bin/node/rpc", + "bin/node/rpc-client", "bin/node/runtime", "bin/node/testing", - "bin/utils/subkey", "bin/utils/chain-spec-builder", + "bin/utils/subkey", "client/api", "client/authority-discovery", "client/basic-authorship", @@ -26,56 +26,53 @@ members = [ "client/consensus/babe", "client/consensus/babe/rpc", "client/consensus/common", + "client/consensus/epochs", "client/consensus/manual-seal", "client/consensus/pow", - "client/consensus/uncles", "client/consensus/slots", - "client/consensus/epochs", + "client/consensus/uncles", "client/db", "client/executor", "client/executor/common", + "client/executor/runtime-test", "client/executor/wasmi", "client/executor/wasmtime", - "client/executor/runtime-test", "client/finality-grandpa", "client/informant", - "client/light", - "client/tracing", "client/keystore", + "client/light", "client/network", - "client/network/test", "client/network-gossip", + "client/network/test", "client/offchain", "client/peerset", "client/proposer-metrics", - "client/rpc-servers", "client/rpc", "client/rpc-api", + "client/rpc-servers", "client/service", "client/service/test", "client/state-db", "client/sync-state-rpc", "client/telemetry", + "client/tracing", "client/transaction-pool", "client/transaction-pool/graph", - "utils/prometheus", - "utils/wasm-builder-runner", "frame/assets", - "frame/aura", "frame/atomic-swap", + "frame/aura", "frame/authority-discovery", "frame/authorship", "frame/babe", "frame/balances", "frame/benchmarking", + "frame/bounties", "frame/collective", "frame/contracts", "frame/contracts/rpc", "frame/contracts/rpc/runtime-api", "frame/democracy", - "frame/elections-phragmen", "frame/elections", - "frame/evm", "frame/example", "frame/example-offchain-worker", "frame/example-parallel", @@ -85,6 +82,7 @@ members = [ "frame/im-online", "frame/indices", "frame/membership", + "frame/merkle-mountain-range", "frame/metadata", "frame/multisig", "frame/nicks", @@ -99,8 +97,8 @@ members = [ "frame/session/benchmarking", "frame/society", "frame/staking", - "frame/staking/reward-curve", "frame/staking/fuzzer", + "frame/staking/reward-curve", "frame/sudo", "frame/support", "frame/support/procedural", @@ -115,62 +113,63 @@ members = [ "frame/transaction-payment/rpc", "frame/transaction-payment/rpc/runtime-api", "frame/treasury", + "frame/tips", "frame/utility", "frame/vesting", "primitives/allocator", + "primitives/api", + "primitives/api/proc-macro", + "primitives/api/test", "primitives/application-crypto", "primitives/application-crypto/test", + "primitives/arithmetic", + "primitives/arithmetic/fuzzer", "primitives/authority-discovery", "primitives/authorship", "primitives/block-builder", "primitives/blockchain", + "primitives/chain-spec", "primitives/consensus/aura", "primitives/consensus/babe", "primitives/consensus/common", "primitives/consensus/pow", "primitives/consensus/vrf", "primitives/core", - "primitives/chain-spec", "primitives/database", "primitives/debug-derive", - "primitives/storage", "primitives/externalities", "primitives/finality-grandpa", "primitives/inherents", + "primitives/io", "primitives/keyring", "primitives/keystore", - "primitives/offchain", - "primitives/panic-handler", "primitives/npos-elections", - "primitives/npos-elections/fuzzer", "primitives/npos-elections/compact", + "primitives/npos-elections/fuzzer", + "primitives/offchain", + "primitives/panic-handler", "primitives/rpc", + "primitives/runtime", "primitives/runtime-interface", "primitives/runtime-interface/proc-macro", + "primitives/runtime-interface/test", "primitives/runtime-interface/test-wasm", "primitives/runtime-interface/test-wasm-deprecated", - "primitives/runtime-interface/test", + "primitives/sandbox", "primitives/serializer", "primitives/session", - "primitives/api", - "primitives/api/proc-macro", - "primitives/api/test", - "primitives/arithmetic", - "primitives/arithmetic/fuzzer", - "primitives/io", - "primitives/runtime", - "primitives/sandbox", "primitives/staking", - "primitives/std", - "primitives/version", "primitives/state-machine", + "primitives/std", + "primitives/storage", "primitives/tasks", - "primitives/timestamp", "primitives/test-primitives", - "primitives/transaction-pool", + "primitives/timestamp", "primitives/tracing", + "primitives/transaction-pool", "primitives/trie", "primitives/utils", + "primitives/version", "primitives/wasm-interface", "test-utils/client", "test-utils/derive", @@ -185,6 +184,7 @@ members = [ "utils/frame/frame-utilities-cli", "utils/frame/rpc/support", "utils/frame/rpc/system", + "utils/prometheus", "utils/wasm-builder", ] @@ -209,7 +209,6 @@ aesni = { opt-level = 3 } blake2 = { opt-level = 3 } blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } -blake2s_simd = { opt-level = 3 } chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } cranelift-wasm = { opt-level = 3 } @@ -219,8 +218,6 @@ crossbeam-queue = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } ed25519-dalek = { opt-level = 3 } -evm-core = { opt-level = 3 } -evm-runtime = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hashbrown = { opt-level = 3 } diff --git a/bin/node-template/README.md b/bin/node-template/README.md index c1aeefe895095a4bd34de39b5d447615f212f06f..8c8b82a14bb86975bda14410d6313500b0388cbe 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -157,7 +157,7 @@ Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this the following: - This file configures several pallets to include in the runtime. Each pallet configuration is - defined by a code block that begins with `impl $PALLET_NAME::Trait for Runtime`. + defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. - The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core @@ -181,8 +181,8 @@ A FRAME pallet is compromised of a number of blockchain primitives: - Events: Substrate uses [events](https://substrate.dev/docs/en/knowledgebase/runtime/events) to notify users of important changes in the runtime. - Errors: When a dispatchable fails, it returns an error. -- Trait: The `Trait` configuration interface is used to define the types and parameters upon which - a FRAME pallet depends. +- Config: The `Config` configuration interface is used to define the types and parameters upon + which a FRAME pallet depends. ## Generate a Custom Node Template diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index d2b5a35b352b25d3b89788cd31727cdae94b5cfc..38cdaa1eea48c7b7ef257474ef5d73842e20ac84 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -22,6 +22,7 @@ sc-cli = { version = "0.8.0", path = "../../../client/cli", features = ["wasmtim sp-core = { version = "2.0.0", path = "../../../primitives/core" } sc-executor = { version = "0.8.0", path = "../../../client/executor", features = ["wasmtime"] } sc-service = { version = "0.8.0", path = "../../../client/service", features = ["wasmtime"] } +sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index f2faf17e4ddf44b724292d014d542b63b594b56f..947123a6bbf5b927edc33c4a3d6d370cee359d28 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -12,6 +12,8 @@ pub struct Cli { #[derive(Debug, StructOpt)] pub enum Subcommand { + /// Key management cli utilities + Key(sc_cli::KeySubcommand), /// Build a chain specification. BuildSpec(sc_cli::BuildSpecCmd), diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index ac950b50483acafde212f4cfa7d4bcff34c5ca55..5c41643a2932f67479ec5cf0529a48248f7f5b1f 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -66,6 +66,7 @@ pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(), Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index d85de7c840dfd04c60f212e19ae947520ce0c87b..7e1939fb023a8082d0abb147902b27b484f1fe35 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -9,7 +9,8 @@ use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use sc_finality_grandpa::{FinalityProofProvider as GrandpaFinalityProofProvider, SharedVoterState}; +use sc_finality_grandpa::SharedVoterState; +use sc_keystore::LocalKeystore; // Our native executor instance. native_executor_instance!( @@ -37,6 +38,10 @@ pub fn new_partial(config: &Configuration) -> Result ) >, ServiceError> { + if config.keystore_remote.is_some() { + return Err(ServiceError::Other( + format!("Remote Keystores are not supported."))) + } let inherent_data_providers = sp_inherents::InherentDataProviders::new(); let (client, backend, keystore_container, task_manager) = @@ -64,7 +69,6 @@ pub fn new_partial(config: &Configuration) -> Result Result Result, &'static str> { + // FIXME: here would the concrete keystore be built, + // must return a concrete type (NOT `LocalKeystore`) that + // implements `CryptoStore` and `SyncCryptoStore` + Err("Remote Keystore not supported.") +} + /// Builds a new service for a full client. -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(mut config: Configuration) -> Result { let sc_service::PartialComponents { - client, backend, mut task_manager, import_queue, keystore_container, + client, backend, mut task_manager, import_queue, mut keystore_container, select_chain, transaction_pool, inherent_data_providers, other: (block_import, grandpa_link), } = new_partial(&config)?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + if let Some(url) = &config.keystore_remote { + match remote_keystore(url) { + Ok(k) => keystore_container.set_remote_keystore(k), + Err(e) => { + return Err(ServiceError::Other( + format!("Error hooking up remote keystore for {}: {}", url, e))) + } + }; + } + config.network.notifications_protocols.push(sc_finality_grandpa::GRANDPA_PROTOCOL_NAME.into()); let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -99,8 +118,6 @@ pub fn new_full(config: Configuration) -> Result { import_queue, on_demand: None, block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), })?; if config.offchain_worker.enabled { @@ -216,8 +233,6 @@ pub fn new_full(config: Configuration) -> Result { "grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)? ); - } else { - sc_finality_grandpa::setup_disabled_grandpa(network)?; } network_starter.start_network(); @@ -225,10 +240,14 @@ pub fn new_full(config: Configuration) -> Result { } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) -> Result { +pub fn new_light(mut config: Configuration) -> Result { let (client, backend, keystore_container, mut task_manager, on_demand) = sc_service::new_light_parts::(&config)?; + config.network.notifications_protocols.push(sc_finality_grandpa::GRANDPA_PROTOCOL_NAME.into()); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), @@ -237,19 +256,21 @@ pub fn new_light(config: Configuration) -> Result { on_demand.clone(), )); - let grandpa_block_import = sc_finality_grandpa::light_block_import( - client.clone(), backend.clone(), &(client.clone() as Arc<_>), - Arc::new(on_demand.checker().clone()) as Arc<_>, + let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), )?; - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); + + let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( + grandpa_block_import.clone(), + client.clone(), + ); let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( sc_consensus_aura::slot_duration(&*client)?, - grandpa_block_import, - None, - Some(Box::new(finality_proof_import)), + aura_block_import, + Some(Box::new(grandpa_block_import)), client.clone(), InherentDataProviders::new(), &task_manager.spawn_handle(), @@ -257,9 +278,6 @@ pub fn new_light(config: Configuration) -> Result { sp_consensus::NeverCanAuthor, )?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, @@ -269,8 +287,6 @@ pub fn new_light(config: Configuration) -> Result { import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), })?; if config.offchain_worker.enabled { diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 729a71278aa9f1c2f37760274b547253799871fb..24de4f2f50dd5cc76678ee0931c02cd5cab3cd31 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -14,9 +14,9 @@ mod mock; mod tests; /// Configure the pallet by specifying the parameters and types on which it depends. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; } // The pallet's runtime storage items. @@ -25,7 +25,7 @@ decl_storage! { // A unique name is used to ensure that the pallet's storage items are isolated. // This name may be updated, but each pallet in the runtime must use a unique name. // ---------------------------------vvvvvvvvvvvvvv - trait Store for Module as TemplateModule { + trait Store for Module as TemplateModule { // Learn more about declaring storage items: // https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items Something get(fn something): Option; @@ -35,7 +35,7 @@ decl_storage! { // Pallets use events to inform users when important changes are made. // https://substrate.dev/docs/en/knowledgebase/runtime/events decl_event!( - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { /// Event documentation should end with an array that provides descriptive names for event /// parameters. [something, who] SomethingStored(u32, AccountId), @@ -44,7 +44,7 @@ decl_event!( // Errors inform users that something went wrong. decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Error names should be descriptive. NoneValue, /// Errors should have helpful documentation associated with them. @@ -56,7 +56,7 @@ decl_error! { // These functions materialize as "extrinsics", which are often compared to transactions. // Dispatchable functions must be annotated with a weight and must return a DispatchResult. decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { // Errors must be initialized if they are used by the pallet. type Error = Error; diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index a3dff240e4847d518343507f084d6ca4339f7a43..84af63a1c3bb8c0a5b21b33511385d0ff5f1be0a 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -1,8 +1,8 @@ -use crate::{Module, Trait}; +use crate::{Module, Config}; use sp_core::H256; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::{impl_outer_origin, parameter_types}; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, }; use frame_system as system; @@ -16,13 +16,13 @@ impl_outer_origin! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } -impl system::Trait for Test { +impl system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = (); type Index = u64; @@ -34,13 +34,6 @@ impl system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -49,7 +42,7 @@ impl system::Trait for Test { type SystemWeightInfo = (); } -impl Trait for Test { +impl Config for Test { type Event = (); } diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index ed5a114b813f814e05688a2900dbfa1b288dd7d8..f1b15070ddde90b3f159ae13a49481c3a189a379 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -48,7 +48,7 @@ hex-literal = { version = "0.3.1", optional = true } template = { version = "2.0.0", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "3.0.0", path = "../../../utils/wasm-builder" } [features] default = ["std"] diff --git a/bin/node-template/runtime/build.rs b/bin/node-template/runtime/build.rs index 9654139121f6f50e2c649a28e79852c8598e630f..9b53d2457dffdc09ea4789b644e6d2b2a3cfc0f5 100644 --- a/bin/node-template/runtime/build.rs +++ b/bin/node-template/runtime/build.rs @@ -1,9 +1,8 @@ -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .export_heap_base() .import_memory() .build() diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index aadfd931cdb507598cbe2683c138c8d7661285f9..51df3dd5a3e4507e2ca51435a5126d0c4941b245 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -13,7 +13,7 @@ use sp_runtime::{ transaction_validity::{TransactionValidity, TransactionSource}, }; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, AccountIdLookup, Verify, IdentifyAccount, NumberFor, Saturating, + BlakeTwo256, Block as BlockT, AccountIdLookup, Verify, IdentifyAccount, NumberFor, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -126,23 +126,27 @@ pub fn native_version() -> NativeVersion { } } +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + parameter_types! { + pub const Version: RuntimeVersion = VERSION; pub const BlockHashCount: BlockNumber = 2400; /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get() - .saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get(); - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; - pub const Version: RuntimeVersion = VERSION; + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength + ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); } // Configure FRAME pallets to include in runtime. -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = (); + /// Block & extrinsics weights: base values and limits. + type BlockWeights = BlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. @@ -165,24 +169,8 @@ impl frame_system::Trait for Runtime { type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; - /// Maximum weight of each block. - type MaximumBlockWeight = MaximumBlockWeight; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The weight of the overhead invoked on the block import process, independent of the - /// extrinsics included in that block. - type BlockExecutionWeight = BlockExecutionWeight; - /// The base weight of any extrinsic processed by the runtime, independent of the - /// logic of that extrinsic. (Signature verification, nonce increment, fee, etc...) - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - /// The maximum weight that a single extrinsic of `Normal` dispatch class can have, - /// idependent of the logic of that extrinsics. (Roughly max block weight - average on - /// initialize cost). - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - /// Maximum size of all encoded transactions (in bytes) that are allowed in one block. - type MaximumBlockLength = MaximumBlockLength; - /// Portion of the block weight that is available to all normal transactions. - type AvailableBlockRatio = AvailableBlockRatio; /// Version of the runtime. type Version = Version; /// Converts a module to the index of the module in `construct_runtime!`. @@ -199,11 +187,11 @@ impl frame_system::Trait for Runtime { type SystemWeightInfo = (); } -impl pallet_aura::Trait for Runtime { +impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -226,7 +214,7 @@ parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; @@ -239,7 +227,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; /// The type for recording an account's balance. type Balance = Balance; @@ -255,20 +243,20 @@ parameter_types! { pub const TransactionByteFee: Balance = 1; } -impl pallet_transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } -impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } /// Configure the pallet template in pallets/template. -impl template::Trait for Runtime { +impl template::Config for Runtime { type Event = Event; } @@ -457,7 +445,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; use frame_system_benchmarking::Module as SystemBench; - impl frame_system_benchmarking::Trait for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 3ca5a3feaee9604c1efae3df096b64a660a45fd3..f60dc55b6f7e6c9643097a923b870e1961abc447 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,13 +8,13 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.30.1", default-features = false } +libp2p = { version = "0.32.2", default-features = false } jsonrpc-core = "15.0.0" serde = "1.0.106" serde_json = "1.0.48" wasm-bindgen = { version = "=0.2.68", features = ["serde-serialize"] } -wasm-bindgen-futures = "0.4.10" -wasm-bindgen-test = "0.3.17" +wasm-bindgen-futures = "0.4.18" +wasm-bindgen-test = "0.3.18" futures = "0.3.4" node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0"} diff --git a/bin/node/browser-testing/src/lib.rs b/bin/node/browser-testing/src/lib.rs index 777e5ea9f132e44d888ce8ea68b8d9e8fdf44655..f4dc0908567828eb7cd658b76cb27d8575274ad5 100644 --- a/bin/node/browser-testing/src/lib.rs +++ b/bin/node/browser-testing/src/lib.rs @@ -26,7 +26,7 @@ //! ``` //! For debug infomation, such as the informant, run without the `--headless` //! flag and open a browser to the url that `wasm-pack test` outputs. -//! For more infomation see https://rustwasm.github.io/docs/wasm-pack/. +//! For more infomation see . use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use wasm_bindgen_futures::JsFuture; diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 26a23ce36ecc0fd6c6a5a4e61133551e242db7bf..6574ccb733b52a3d48a77e38be10e5b358bb328e 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -41,7 +41,7 @@ hex-literal = "0.3.1" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } -tracing = "0.1.19" +tracing = "0.1.22" parking_lot = "0.10.0" # primitives @@ -102,7 +102,7 @@ node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } -wasm-bindgen-futures = { version = "0.4.7", optional = true } +wasm-bindgen-futures = { version = "0.4.18", optional = true } browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0"} [target.'cfg(target_arch="x86_64")'.dependencies] diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 90824a5572f12f9e50f19703e96f2a9c046c5e40..83dc95e3b64dffec313816a1d251935777538f5d 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -218,7 +218,7 @@ pub fn testnet_genesis( endowed_accounts: Option>, enable_println: bool, ) -> GenesisConfig { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { + let mut endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -234,10 +234,16 @@ pub fn testnet_genesis( get_account_id_from_seed::("Ferdie//stash"), ] }); + initial_authorities.iter().for_each(|x| + if !endowed_accounts.contains(&x.0) { + endowed_accounts.push(x.0.clone()) + } + ); + let num_endowed_accounts = endowed_accounts.len(); const ENDOWMENT: Balance = 10_000_000 * DOLLARS; - const STASH: Balance = 100 * DOLLARS; + const STASH: Balance = ENDOWMENT / 1000; GenesisConfig { frame_system: Some(SystemConfig { @@ -246,9 +252,8 @@ pub fn testnet_genesis( }), pallet_balances: Some(BalancesConfig { balances: endowed_accounts.iter().cloned() - .map(|k| (k, ENDOWMENT)) - .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect(), + .map(|x| (x, ENDOWMENT)) + .collect() }), pallet_indices: Some(IndicesConfig { indices: vec![], diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 3bc406b84fc67481b94535829b06dc372c258b84..5eb8e35e69ec5e36c6f282919905d855accdad24 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -22,7 +22,6 @@ use std::sync::Arc; use sc_consensus_babe; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_primitives::Block; use node_runtime::RuntimeApi; use sc_service::{ @@ -57,10 +56,7 @@ pub fn new_partial(config: &Configuration) -> Result, sc_consensus_babe::BabeLink, ), - ( - grandpa::SharedVoterState, - Arc>, - ), + grandpa::SharedVoterState, ) >, ServiceError> { let (client, backend, keystore_container, task_manager) = @@ -93,7 +89,6 @@ pub fn new_partial(config: &Configuration) -> Result Result, &sc_consensus_babe::BabeLink, @@ -181,7 +176,9 @@ pub fn new_full_base( other: (rpc_extensions_builder, import_setup, rpc_setup), } = new_partial(&config)?; - let (shared_voter_state, finality_proof_provider) = rpc_setup; + let shared_voter_state = rpc_setup; + + config.network.notifications_protocols.push(grandpa::GRANDPA_PROTOCOL_NAME.into()); let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -192,8 +189,6 @@ pub fn new_full_base( import_queue, on_demand: None, block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), })?; if config.offchain_worker.enabled { @@ -322,8 +317,6 @@ pub fn new_full_base( "grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)? ); - } else { - grandpa::setup_disabled_grandpa(network.clone())?; } network_starter.start_network(); @@ -345,7 +338,7 @@ pub fn new_full(config: Configuration) }) } -pub fn new_light_base(config: Configuration) -> Result<( +pub fn new_light_base(mut config: Configuration) -> Result<( TaskManager, RpcHandlers, Arc, Arc::Hash>>, Arc>> @@ -353,6 +346,8 @@ pub fn new_light_base(config: Configuration) -> Result<( let (client, backend, keystore_container, mut task_manager, on_demand) = sc_service::new_light_parts::(&config)?; + config.network.notifications_protocols.push(grandpa::GRANDPA_PROTOCOL_NAME.into()); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( @@ -363,14 +358,12 @@ pub fn new_light_base(config: Configuration) -> Result<( on_demand.clone(), )); - let grandpa_block_import = grandpa::light_block_import( - client.clone(), backend.clone(), &(client.clone() as Arc<_>), - Arc::new(on_demand.checker().clone()), + let (grandpa_block_import, _) = grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); + let justification_import = grandpa_block_import.clone(); let (babe_block_import, babe_link) = sc_consensus_babe::block_import( sc_consensus_babe::Config::get_or_compute(&*client)?, @@ -383,8 +376,7 @@ pub fn new_light_base(config: Configuration) -> Result<( let import_queue = sc_consensus_babe::import_queue( babe_link, babe_block_import, - None, - Some(Box::new(finality_proof_import)), + Some(Box::new(justification_import)), client.clone(), select_chain.clone(), inherent_data_providers.clone(), @@ -393,9 +385,6 @@ pub fn new_light_base(config: Configuration) -> Result<( sp_consensus::NeverCanAuthor, )?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, @@ -405,8 +394,6 @@ pub fn new_light_base(config: Configuration) -> Result<( import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), })?; network_starter.start_network(); diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index a48efaea2d6958632b72979cd467b8c4ba5fad65..09438bfacd455ca73e8718d9785c1bef6329e208 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -27,7 +27,6 @@ use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, }; -use pallet_contracts::ContractAddressFor; use frame_system::{self, EventRecord, Phase}; use node_runtime::{ @@ -581,15 +580,15 @@ const CODE_TRANSFER: &str = r#" #[test] fn deploying_wasm_contract_should_work() { let transfer_code = wat::parse_str(CODE_TRANSFER).unwrap(); - let transfer_ch = ::Hashing::hash(&transfer_code); + let transfer_ch = ::Hashing::hash(&transfer_code); - let addr = ::DetermineContractAddress::contract_address_for( + let addr = pallet_contracts::Module::::contract_address( + &charlie(), &transfer_ch, &[], - &charlie(), ); - let subsistence = pallet_contracts::Config::::subsistence_threshold_uncached(); + let subsistence = pallet_contracts::ConfigCache::::subsistence_threshold_uncached(); let b = construct_block( &mut new_test_ext(compact_code_unwrap(), false), @@ -613,7 +612,8 @@ fn deploying_wasm_contract_should_work() { 1 * DOLLARS + subsistence, 500_000_000, transfer_ch, - Vec::new() + Vec::new(), + Vec::new(), ) ), }, diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index 26d9de133c6883c8e2404fd6d996985f336e09df..e88a18032698ebbe0636a82ffd394d36ed5361c7 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -12,7 +12,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.1.29" -hyper = "0.12.35" +hyper = "~0.12.35" jsonrpc-core-client = { version = "15.1.0", default-features = false, features = ["http"] } log = "0.4.8" node-primitives = { version = "2.0.0", path = "../primitives" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 2bad2db510be4fae67b1c3bcaff5eb4803a9258b..4dabc5c0159210be654df67e3762d639ff8cd8ca 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -43,10 +43,12 @@ frame-support = { version = "2.0.0", default-features = false, path = "../../../ frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } frame-system-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-assets = { version = "2.0.0", default-features = false, path = "../../../frame/assets" } pallet-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../frame/authority-discovery" } pallet-authorship = { version = "2.0.0", default-features = false, path = "../../../frame/authorship" } pallet-babe = { version = "2.0.0", default-features = false, path = "../../../frame/babe" } pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } +pallet-bounties = { version = "2.0.0", default-features = false, path = "../../../frame/bounties" } pallet-collective = { version = "2.0.0", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "2.0.0", default-features = false, path = "../../../frame/contracts" } pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../../frame/contracts/common/" } @@ -58,6 +60,7 @@ pallet-im-online = { version = "2.0.0", default-features = false, path = "../../ pallet-indices = { version = "2.0.0", default-features = false, path = "../../../frame/indices" } pallet-identity = { version = "2.0.0", default-features = false, path = "../../../frame/identity" } pallet-membership = { version = "2.0.0", default-features = false, path = "../../../frame/membership" } +pallet-mmr = { version = "2.0.0", default-features = false, path = "../../../frame/merkle-mountain-range" } pallet-multisig = { version = "2.0.0", default-features = false, path = "../../../frame/multisig" } pallet-offences = { version = "2.0.0", default-features = false, path = "../../../frame/offences" } pallet-offences-benchmarking = { version = "2.0.0", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } @@ -72,6 +75,7 @@ pallet-scheduler = { version = "2.0.0", default-features = false, path = "../../ pallet-society = { version = "2.0.0", default-features = false, path = "../../../frame/society" } pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } +pallet-tips = { version = "2.0.0", default-features = false, path = "../../../frame/tips" } pallet-treasury = { version = "2.0.0", default-features = false, path = "../../../frame/treasury" } pallet-utility = { version = "2.0.0", default-features = false, path = "../../../frame/utility" } pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } @@ -79,7 +83,7 @@ pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-featur pallet-vesting = { version = "2.0.0", default-features = false, path = "../../../frame/vesting" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "3.0.0", path = "../../../utils/wasm-builder" } [dev-dependencies] sp-io = { version = "2.0.0", path = "../../../primitives/io" } @@ -89,11 +93,13 @@ default = ["std"] with-tracing = [ "frame-executive/with-tracing" ] std = [ "sp-authority-discovery/std", + "pallet-assets/std", "pallet-authority-discovery/std", "pallet-authorship/std", "sp-consensus-babe/std", "pallet-babe/std", "pallet-balances/std", + "pallet-bounties/std", "sp-block-builder/std", "codec/std", "pallet-collective/std", @@ -108,6 +114,7 @@ std = [ "pallet-indices/std", "sp-inherents/std", "pallet-membership/std", + "pallet-mmr/std", "pallet-multisig/std", "pallet-identity/std", "pallet-scheduler/std", @@ -132,6 +139,7 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-timestamp/std", + "pallet-tips/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", "pallet-treasury/std", @@ -147,8 +155,10 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-bounties/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-contracts/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", @@ -157,12 +167,14 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", + "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-tips/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/bin/node/runtime/build.rs b/bin/node/runtime/build.rs index 4f111bc9930078d09f02a1e189e2edcd26c993e4..8a0b4d7a0c15745cbc743130b522ddf693d3822b 100644 --- a/bin/node/runtime/build.rs +++ b/bin/node/runtime/build.rs @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 16666997b3a5597b807296f1b285177216342577..d7910c2c63b8a81c498f764e912ea1a48ee70886 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -34,13 +34,15 @@ mod multiplier_tests { use crate::{ constants::{currency::*, time::*}, - TransactionPayment, MaximumBlockWeight, AvailableBlockRatio, Runtime, TargetBlockFullness, + TransactionPayment, Runtime, TargetBlockFullness, AdjustmentVariable, System, MinimumMultiplier, + RuntimeBlockWeights as BlockWeights, }; - use frame_support::weights::{Weight, WeightToFeePolynomial}; + use frame_support::weights::{Weight, WeightToFeePolynomial, DispatchClass}; - fn max() -> Weight { - AvailableBlockRatio::get() * MaximumBlockWeight::get() + fn max_normal() -> Weight { + BlockWeights::get().get(DispatchClass::Normal).max_total + .unwrap_or_else(|| BlockWeights::get().max_block) } fn min_multiplier() -> Multiplier { @@ -48,7 +50,7 @@ mod multiplier_tests { } fn target() -> Weight { - TargetBlockFullness::get() * max() + TargetBlockFullness::get() * max_normal() } // update based on runtime impl. @@ -69,7 +71,7 @@ mod multiplier_tests { let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); // maximum tx weight - let m = max() as f64; + let m = max_normal() as f64; // block weight always truncated to max weight let block_weight = (block_weight as f64).min(m); let v: f64 = AdjustmentVariable::get().to_fraction(); @@ -89,7 +91,7 @@ mod multiplier_tests { let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default().build_storage::().unwrap().into(); t.execute_with(|| { - System::set_block_limits(w, 0); + System::set_block_consumed_resources(w, 0); assertions() }); } @@ -102,8 +104,8 @@ mod multiplier_tests { (100, fm.clone()), (1000, fm.clone()), (target(), fm.clone()), - (max() / 2, fm.clone()), - (max(), fm.clone()), + (max_normal() / 2, fm.clone()), + (max_normal(), fm.clone()), ]; test_set.into_iter().for_each(|(w, fm)| { run_with_system_weight(w, || { @@ -164,7 +166,7 @@ mod multiplier_tests { #[test] fn min_change_per_day() { - run_with_system_weight(max(), || { + run_with_system_weight(max_normal(), || { let mut fm = Multiplier::one(); // See the example in the doc of `TargetedFeeAdjustment`. are at least 0.234, hence // `fm > 1.234`. @@ -182,7 +184,7 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = AvailableBlockRatio::get() * max() - 100; + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - 100; // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); @@ -200,7 +202,7 @@ mod multiplier_tests { fm = next; iterations += 1; let fee = - ::WeightToFee::calc(&tx_weight); + ::WeightToFee::calc(&tx_weight); let adjusted_fee = fm.saturating_mul_acc_int(fee); println!( "iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \ @@ -320,15 +322,19 @@ mod multiplier_tests { 10 * mb, 2147483647, 4294967295, - MaximumBlockWeight::get() / 2, - MaximumBlockWeight::get(), + BlockWeights::get().max_block / 2, + BlockWeights::get().max_block, Weight::max_value() / 2, Weight::max_value(), ].into_iter().for_each(|i| { run_with_system_weight(i, || { let next = runtime_multiplier_update(Multiplier::one()); let truth = truth_value_update(i, Multiplier::one()); - assert_eq_error_rate!(truth, next, Multiplier::from_inner(50_000_000)); + assert_eq_error_rate!( + truth, + next, + Multiplier::from_inner(50_000_000) + ); }); }); diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 3e08b2cf8a6f8408e80d0d3550dc8cca8c92047e..fb77fd2ebd405c7a738c27ab12e64115f5ea3e1a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! The Substrate runtime. This can be compiled with ``#[no_std]`, ready for Wasm. +//! The Substrate runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. @@ -28,14 +28,17 @@ use frame_support::{ construct_runtime, parameter_types, debug, RuntimeDebug, weights::{ Weight, IdentityFee, - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, DispatchClass, }, traits::{ Currency, Imbalance, KeyOwnerProofSystem, OnUnbalanced, Randomness, LockIdentifier, U128CurrencyToVote, }, }; -use frame_system::{EnsureRoot, EnsureOneOf}; +use frame_system::{ + EnsureRoot, EnsureOneOf, + limits::{BlockWeights, BlockLength} +}; use frame_support::traits::InstanceFilter; use codec::{Encode, Decode}; use sp_core::{ @@ -54,7 +57,7 @@ use sp_runtime::curve::PiecewiseLinear; use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; use sp_runtime::traits::{ self, BlakeTwo256, Block as BlockT, StaticLookup, SaturatedConversion, - ConvertInto, OpaqueKeys, NumberFor, Saturating, + ConvertInto, OpaqueKeys, NumberFor, }; use sp_version::RuntimeVersion; #[cfg(any(feature = "std", test))] @@ -91,11 +94,11 @@ use sp_runtime::generic::Era; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. This means the client is \ - built with `BUILD_DUMMY_WASM_BINARY` flag and it is only usable for \ + built with `SKIP_WASM_BUILD` flag and it is only usable for \ production chains. Please rebuild with the flag disabled.") } @@ -141,23 +144,47 @@ impl OnUnbalanced for DealWithFees { } } -const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_percent(10); +/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for 2 seconds of compute with a 6 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; - /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get().saturating_sub(AVERAGE_ON_INITIALIZE_WEIGHT) - * MaximumBlockWeight::get(); - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; pub const Version: RuntimeVersion = VERSION; -} - -const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); - -impl frame_system::Trait for Runtime { + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); + +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type DbWeight = RocksDbWeight; type Origin = Origin; type Call = Call; type Index = Index; @@ -169,13 +196,6 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -184,7 +204,7 @@ impl frame_system::Trait for Runtime { type SystemWeightInfo = frame_system::weights::SubstrateWeight; } -impl pallet_utility::Trait for Runtime { +impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; type WeightInfo = pallet_utility::weights::SubstrateWeight; @@ -198,7 +218,7 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl pallet_multisig::Trait for Runtime { +impl pallet_multisig::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -261,7 +281,7 @@ impl InstanceFilter for ProxyType { } } -impl pallet_proxy::Trait for Runtime { +impl pallet_proxy::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -277,11 +297,12 @@ impl pallet_proxy::Trait for Runtime { } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 50; } -impl pallet_scheduler::Trait for Runtime { +impl pallet_scheduler::Config for Runtime { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; @@ -297,7 +318,7 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = pallet_babe::ExternalTrigger; @@ -324,7 +345,7 @@ parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; @@ -339,7 +360,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type Balance = Balance; type DustRemoval = (); @@ -356,7 +377,7 @@ parameter_types! { pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); } -impl pallet_transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; @@ -368,7 +389,7 @@ parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = Moment; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -379,7 +400,7 @@ parameter_types! { pub const UncleGenerations: BlockNumber = 5; } -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -399,9 +420,9 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; - type ValidatorId = ::AccountId; + type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; @@ -412,7 +433,7 @@ impl pallet_session::Trait for Runtime { type WeightInfo = pallet_session::weights::SubstrateWeight; } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -438,12 +459,13 @@ parameter_types! { pub const MaxIterations: u32 = 10; // 0.05%. The higher the value, the more strict solution acceptance becomes. pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); - pub OffchainSolutionWeightLimit: Weight = MaximumExtrinsicWeight::get() - .saturating_sub(BlockExecutionWeight::get()) - .saturating_sub(ExtrinsicBaseWeight::get()); + pub OffchainSolutionWeightLimit: Weight = RuntimeBlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic.expect("Normal extrinsics have a weight limit configured; qed") + .saturating_sub(BlockExecutionWeight::get()); } -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = U128CurrencyToVote; @@ -489,7 +511,7 @@ parameter_types! { pub const MaxProposals: u32 = 100; } -impl pallet_democracy::Trait for Runtime { +impl pallet_democracy::Config for Runtime { type Proposal = Call; type Event = Event; type Currency = Balances; @@ -541,7 +563,7 @@ parameter_types! { } type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -564,7 +586,7 @@ parameter_types! { // Make sure that there are no more than `MaxMembers` members elected via elections-phragmen. const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); -impl pallet_elections_phragmen::Trait for Runtime { +impl pallet_elections_phragmen::Config for Runtime { type Event = Event; type ModuleId = ElectionsPhragmenModuleId; type Currency = Balances; @@ -591,7 +613,7 @@ parameter_types! { } type TechnicalCollective = pallet_collective::Instance2; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -607,7 +629,7 @@ type EnsureRootOrHalfCouncil = EnsureOneOf< EnsureRoot, pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> >; -impl pallet_membership::Trait for Runtime { +impl pallet_membership::Config for Runtime { type Event = Event; type AddOrigin = EnsureRootOrHalfCouncil; type RemoveOrigin = EnsureRootOrHalfCouncil; @@ -636,7 +658,7 @@ parameter_types! { pub const BountyValueMinimum: Balance = 5 * DOLLARS; } -impl pallet_treasury::Trait for Runtime { +impl pallet_treasury::Config for Runtime { type ModuleId = TreasuryModuleId; type Currency = Balances; type ApproveOrigin = EnsureOneOf< @@ -649,25 +671,38 @@ impl pallet_treasury::Trait for Runtime { EnsureRoot, pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> >; - type Tippers = Elections; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type DataDepositPerByte = DataDepositPerByte; type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BurnDestination = (); + type SpendFunds = Bounties; + type WeightInfo = pallet_treasury::weights::SubstrateWeight; +} + +impl pallet_bounties::Config for Runtime { + type Event = Event; type BountyDepositBase = BountyDepositBase; type BountyDepositPayoutDelay = BountyDepositPayoutDelay; type BountyUpdatePeriod = BountyUpdatePeriod; type BountyCuratorDeposit = BountyCuratorDeposit; type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; type MaximumReasonLength = MaximumReasonLength; - type BurnDestination = (); - type WeightInfo = pallet_treasury::weights::SubstrateWeight; + type WeightInfo = pallet_bounties::weights::SubstrateWeight; +} + +impl pallet_tips::Config for Runtime { + type Event = Event; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type Tippers = Elections; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type WeightInfo = pallet_tips::weights::SubstrateWeight; } parameter_types! { @@ -675,29 +710,31 @@ parameter_types! { pub const RentByteFee: Balance = 4 * MILLICENTS; pub const RentDepositOffset: Balance = 1000 * MILLICENTS; pub const SurchargeReward: Balance = 150 * MILLICENTS; + pub const SignedClaimHandicap: u32 = 2; + pub const MaxDepth: u32 = 32; + pub const StorageSizeOffset: u32 = 8; + pub const MaxValueSize: u32 = 16 * 1024; } -impl pallet_contracts::Trait for Runtime { +impl pallet_contracts::Config for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; type Currency = Balances; type Event = Event; - type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer; - type TrieIdGenerator = pallet_contracts::TrieIdFromParentCounter; type RentPayment = (); - type SignedClaimHandicap = pallet_contracts::DefaultSignedClaimHandicap; + type SignedClaimHandicap = SignedClaimHandicap; type TombstoneDeposit = TombstoneDeposit; - type StorageSizeOffset = pallet_contracts::DefaultStorageSizeOffset; + type StorageSizeOffset = StorageSizeOffset; type RentByteFee = RentByteFee; type RentDepositOffset = RentDepositOffset; type SurchargeReward = SurchargeReward; - type MaxDepth = pallet_contracts::DefaultMaxDepth; - type MaxValueSize = pallet_contracts::DefaultMaxValueSize; + type MaxDepth = MaxDepth; + type MaxValueSize = MaxValueSize; type WeightPrice = pallet_transaction_payment::Module; type WeightInfo = pallet_contracts::weights::SubstrateWeight; } -impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } @@ -767,7 +804,7 @@ impl frame_system::offchain::SendTransactionTypes for Runtime where type OverarchingCall = Call; } -impl pallet_im_online::Trait for Runtime { +impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type SessionDuration = SessionDuration; @@ -777,19 +814,20 @@ impl pallet_im_online::Trait for Runtime { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * + RuntimeBlockWeights::get().max_block; } -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime {} -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -818,7 +856,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl pallet_identity::Trait for Runtime { +impl pallet_identity::Config for Runtime { type Event = Event; type Currency = Balances; type BasicDeposit = BasicDeposit; @@ -840,7 +878,7 @@ parameter_types! { pub const RecoveryDeposit: Balance = 5 * DOLLARS; } -impl pallet_recovery::Trait for Runtime { +impl pallet_recovery::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -861,7 +899,7 @@ parameter_types! { pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); } -impl pallet_society::Trait for Runtime { +impl pallet_society::Config for Runtime { type Event = Event; type ModuleId = SocietyModuleId; type Currency = Balances; @@ -882,7 +920,7 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl pallet_vesting::Trait for Runtime { +impl pallet_vesting::Config for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; @@ -890,6 +928,31 @@ impl pallet_vesting::Trait for Runtime { type WeightInfo = pallet_vesting::weights::SubstrateWeight; } +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = ::Hashing; + type Hash = ::Hash; + type LeafData = frame_system::Module; + type OnNewRoot = (); + type WeightInfo = (); +} + +parameter_types! { + pub const AssetDepositBase: Balance = 100 * DOLLARS; + pub const AssetDepositPerZombie: Balance = 1 * DOLLARS; +} + +impl pallet_assets::Config for Runtime { + type Event = Event; + type Balance = u64; + type AssetId = u32; + type Currency = Balances; + type ForceOrigin = EnsureRoot; + type AssetDepositBase = AssetDepositBase; + type AssetDepositPerZombie = AssetDepositPerZombie; + type WeightInfo = pallet_assets::weights::SubstrateWeight; +} + construct_runtime!( pub enum Runtime where Block = Block, @@ -927,6 +990,10 @@ construct_runtime!( Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, Proxy: pallet_proxy::{Module, Call, Storage, Event}, Multisig: pallet_multisig::{Module, Call, Storage, Event}, + Bounties: pallet_bounties::{Module, Call, Storage, Event}, + Tips: pallet_tips::{Module, Call, Storage, Event}, + Assets: pallet_assets::{Module, Call, Storage, Event}, + Mmr: pallet_mmr::{Module, Storage}, } ); @@ -1171,9 +1238,9 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Module as OffencesBench; use frame_system_benchmarking::Module as SystemBench; - impl pallet_session_benchmarking::Trait for Runtime {} - impl pallet_offences_benchmarking::Trait for Runtime {} - impl frame_system_benchmarking::Trait for Runtime {} + impl pallet_session_benchmarking::Config for Runtime {} + impl pallet_offences_benchmarking::Config for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number @@ -1193,8 +1260,10 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); + add_benchmark!(params, batches, pallet_assets, Assets); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_bounties, Bounties); add_benchmark!(params, batches, pallet_collective, Council); add_benchmark!(params, batches, pallet_contracts, Contracts); add_benchmark!(params, batches, pallet_democracy, Democracy); @@ -1203,6 +1272,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_identity, Identity); add_benchmark!(params, batches, pallet_im_online, ImOnline); add_benchmark!(params, batches, pallet_indices, Indices); + add_benchmark!(params, batches, pallet_mmr, Mmr); add_benchmark!(params, batches, pallet_multisig, Multisig); add_benchmark!(params, batches, pallet_offences, OffencesBench::); add_benchmark!(params, batches, pallet_proxy, Proxy); @@ -1211,6 +1281,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_staking, Staking); add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_tips, Tips); add_benchmark!(params, batches, pallet_treasury, Treasury); add_benchmark!(params, batches, pallet_utility, Utility); add_benchmark!(params, batches, pallet_vesting, Vesting); diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index a123da25301d073214c7d6ec48b362e445143096..35af52a2f36c1ad3170ff418fb4900c2f3a758f0 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -695,7 +695,6 @@ impl BenchContext { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } ) diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs index 051628e84a193419221d0eccd04f797dc2b56a23..c38a48576524301f85f323b6986ece5ab052b3d0 100644 --- a/bin/utils/subkey/src/lib.rs +++ b/bin/utils/subkey/src/lib.rs @@ -63,7 +63,7 @@ pub enum Subkey { /// Run the subkey command, given the apropriate runtime. pub fn run() -> Result<(), Error> where - R: frame_system::Trait, + R: frame_system::Config, R::AccountId: Ss58Codec { match Subkey::from_args() { diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index d0fb5fc3ee0e2e82034f09f227df63fd1a635060..07036bfb414a26e45f7cfdd7d09120b3666c1842 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -23,9 +23,7 @@ fnv = "1.0.6" futures = "0.3.1" hash-db = { version = "0.15.2", default-features = false } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -hex-literal = "0.3.1" sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } kvdb = "0.7.0" log = "0.4.8" parking_lot = "0.10.0" @@ -39,7 +37,6 @@ sp-api = { version = "2.0.0", path = "../../primitives/api" } sp-utils = { version = "2.0.0", path = "../../primitives/utils" } sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } sp-trie = { version = "2.0.0", path = "../../primitives/trie" } sp-storage = { version = "2.0.0", path = "../../primitives/storage" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } @@ -49,3 +46,4 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0. kvdb-memorydb = "0.7.0" sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +thiserror = "1.0.21" diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 144851dac0075f9c2a31f01225aaee5bb9504178..f9ba64544a8c0199072e82b010a33d72de6c2a79 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -312,13 +312,21 @@ pub mod tests { use sp_test_primitives::{Block, Header, Extrinsic}; use super::*; + #[derive(Debug, thiserror::Error)] + #[error("Not implemented on test node")] + struct MockError; + + impl Into for MockError { + fn into(self) -> ClientError { + ClientError::Application(Box::new(self)) + } + } + pub type OkCallFetcher = Mutex>; - fn not_implemented_in_tests() -> Ready> - where - E: std::convert::From<&'static str>, + fn not_implemented_in_tests() -> Ready> { - futures::future::ready(Err("Not implemented on test node".into())) + futures::future::ready(Err(MockError.into())) } impl Fetcher for OkCallFetcher { diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 40b929fc8a0f4b944c799ad9bef58e9a2e85f305..8878becd7e02a34e3986185dc6d53588a1b48e6c 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -18,19 +18,17 @@ prost-build = "0.6.1" [dependencies] async-trait = "0.1" -bytes = "0.5.0" codec = { package = "parity-scale-codec", default-features = false, version = "1.3.4" } derive_more = "0.99.2" either = "1.5.3" futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.30.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.32.2", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} prost = "0.6.1" rand = "0.7.2" sc-client-api = { version = "2.0.0", path = "../api" } -sc-keystore = { version = "2.0.0", path = "../keystore" } sc-network = { version = "0.8.0", path = "../network" } serde_json = "1.0.41" sp-authority-discovery = { version = "2.0.0", path = "../../primitives/authority-discovery" } diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index 48bcdf33114b11d33e9331a8b7c221350b8d105b..82e4a6dd6f3fde8d25b54ab7304de77f9908b35c 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -31,7 +31,7 @@ pub enum Error { /// Failed to verify a dht payload with the given signature. VerifyingDhtPayload, /// Failed to hash the authority id to be used as a dht key. - HashingAuthorityId(libp2p::core::multiaddr::multihash::EncodeError), + HashingAuthorityId(libp2p::core::multiaddr::multihash::Error), /// Failed calling into the Substrate runtime. CallingRuntime(sp_blockchain::Error), /// Received a dht record with a key that does not match any in-flight awaited keys. diff --git a/client/authority-discovery/src/interval.rs b/client/authority-discovery/src/interval.rs new file mode 100644 index 0000000000000000000000000000000000000000..b3aa5b1c0f6785f68a9689e6f45a94e7e08b9d19 --- /dev/null +++ b/client/authority-discovery/src/interval.rs @@ -0,0 +1,62 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use futures::stream::Stream; +use futures::future::FutureExt; +use futures::ready; +use futures_timer::Delay; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +/// Exponentially increasing interval +/// +/// Doubles interval duration on each tick until the configured maximum is reached. +pub struct ExpIncInterval { + max: Duration, + next: Duration, + delay: Delay, +} + +impl ExpIncInterval { + /// Create a new [`ExpIncInterval`]. + pub fn new(start: Duration, max: Duration) -> Self { + let delay = Delay::new(start); + Self { + max, + next: start * 2, + delay, + } + } + + /// Fast forward the exponentially increasing interval to the configured maximum. + pub fn set_to_max(&mut self) { + self.next = self.max; + self.delay = Delay::new(self.next); + } +} + +impl Stream for ExpIncInterval { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.delay.poll_unpin(cx)); + self.delay = Delay::new(self.next); + self.next = std::cmp::min(self.max, self.next * 2); + + Poll::Ready(Some(())) + } +} diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 4ee57f31e04a59d311dd0cf3ee493f4c1b3e8c6c..41aa01e56bde20f36fbcde6eeee3156ac4addb6e 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -38,50 +38,41 @@ use sp_runtime::traits::Block as BlockT; use sp_api::ProvideRuntimeApi; mod error; +mod interval; mod service; +mod worker; + #[cfg(test)] mod tests; -mod worker; /// Configuration of [`Worker`]. pub struct WorkerConfig { - /// The interval in which the node will publish its own address on the DHT. + /// The maximum interval in which the node will publish its own address on the DHT. /// - /// By default this is set to 12 hours. - pub publish_interval: Duration, - /// The interval in which the node will query the DHT for new entries. + /// By default this is set to 1 hour. + pub max_publish_interval: Duration, + /// The maximum interval in which the node will query the DHT for new entries. /// /// By default this is set to 10 minutes. - pub query_interval: Duration, - /// The time the node will wait before triggering the first DHT query or publish. - /// - /// By default this is set to 30 seconds. - /// - /// This default is based on the rough boostrap time required by libp2p Kademlia. - pub query_start_delay: Duration, - /// The interval in which the worker will instruct the peerset to connect to a random subset - /// of discovered validators. - /// - /// By default this is set to 10 minutes. - pub priority_group_set_interval: Duration, - /// The time the worker will wait after each query interval tick to pass a subset of - /// the cached authority addresses down to the peerset. - /// - /// Be aware that the actual delay will be computed by [`Self::query_start_delay`] + - /// [`Self::priority_group_set_start_delay`] - /// - /// By default this is set to 5 minutes. - pub priority_group_set_offset: Duration, + pub max_query_interval: Duration, } impl Default for WorkerConfig { fn default() -> Self { Self { - publish_interval: Duration::from_secs(12 * 60 * 60), - query_interval: Duration::from_secs(10 * 60), - query_start_delay: Duration::from_secs(30), - priority_group_set_interval: Duration::from_secs(10 * 60), - priority_group_set_offset: Duration::from_secs(5 * 60), + // Kademlia's default time-to-live for Dht records is 36h, republishing records every + // 24h through libp2p-kad. Given that a node could restart at any point in time, one can + // not depend on the republishing process, thus publishing own external addresses should + // happen on an interval < 36h. + max_publish_interval: Duration::from_secs(1 * 60 * 60), + // External addresses of remote authorities can change at any given point in time. The + // interval on which to trigger new queries for the current and next authorities is a trade + // off between efficiency and performance. + // + // Querying 700 [`AuthorityId`]s takes ~8m on the Kusama DHT (16th Nov 2020) when + // comparing `authority_discovery_authority_addresses_requested_total` and + // `authority_discovery_dht_event_received`. + max_query_interval: Duration::from_secs(10 * 60), } } } diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index 7eabeb3daf52e3fb6e3c16bd965d357c178915c7..d23d2f3a480f7be7c97aa09dfde62d05d5c3da56 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -22,14 +22,14 @@ use futures::SinkExt; use sc_network::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; -/// Service to interact with the [`Worker`]. +/// Service to interact with the [`crate::Worker`]. #[derive(Clone)] pub struct Service { to_worker: mpsc::Sender, } -/// A [`Service`] allows to interact with a [`Worker`], e.g. by querying the -/// [`Worker`]'s local address cache for a given [`AuthorityId`]. +/// A [`Service`] allows to interact with a [`crate::Worker`], e.g. by querying the +/// [`crate::Worker`]'s local address cache for a given [`AuthorityId`]. impl Service { pub(crate) fn new(to_worker: mpsc::Sender) -> Self { Self { @@ -44,7 +44,7 @@ impl Service { /// [`crate::Worker`] failed. /// /// Note: [`Multiaddr`]s returned always include a [`PeerId`] via a - /// [`libp2p::core::multiaddr:Protocol::P2p`] component. Equality of + /// [`libp2p::core::multiaddr::Protocol::P2p`] component. Equality of /// [`PeerId`]s across [`Multiaddr`]s returned by a single call is not /// enforced today, given that there are still authorities out there /// publishing the addresses of their sentry nodes on the DHT. In the future diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 42ae3a5213f0fc04ef78605e5b115cc71ca0abab..d886f24542d76885f15d5d854466af1a42bfc12d 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -14,22 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{error::{Error, Result}, ServicetoWorkerMsg}; +use crate::{error::{Error, Result}, interval::ExpIncInterval, ServicetoWorkerMsg}; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use std::marker::PhantomData; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Duration; use futures::channel::mpsc; use futures::{FutureExt, Stream, StreamExt, stream::Fuse}; -use futures_timer::Delay; use addr_cache::AddrCache; use async_trait::async_trait; use codec::Decode; -use libp2p::{core::multiaddr, multihash::Multihash}; +use libp2p::{core::multiaddr, multihash::{Multihash, Hasher}}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; use prost::Message; @@ -54,8 +53,6 @@ mod schema { include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); } #[cfg(test)] pub mod tests; -type Interval = Box + Unpin + Send + Sync>; - const LOG_TARGET: &'static str = "sub-authority-discovery"; /// Name of the Substrate peerset priority group for authorities discovered through the authority @@ -103,7 +100,7 @@ pub enum Role { /// /// 5. Allow querying of the collected addresses via the [`crate::Service`]. pub struct Worker { - /// Channel receiver for messages send by a [`Service`]. + /// Channel receiver for messages send by a [`crate::Service`]. from_service: Fuse>, client: Arc, @@ -113,12 +110,12 @@ pub struct Worker { dht_event_rx: DhtEventStream, /// Interval to be proactive, publishing own addresses. - publish_interval: Interval, + publish_interval: ExpIncInterval, /// Interval at which to request addresses of authorities, refilling the pending lookups queue. - query_interval: Interval, + query_interval: ExpIncInterval, /// Interval on which to set the peerset priority group to a new random /// set of addresses. - priority_group_set_interval: Interval, + priority_group_set_interval: ExpIncInterval, /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, @@ -153,31 +150,26 @@ where prometheus_registry: Option, config: crate::WorkerConfig, ) -> Self { - // Kademlia's default time-to-live for Dht records is 36h, republishing - // records every 24h through libp2p-kad. - // Given that a node could restart at any point in time, one can not depend on the - // republishing process, thus publishing own external addresses should happen on an interval - // < 36h. - let publish_interval = interval_at( - Instant::now() + config.query_start_delay, - config.publish_interval, + // When a node starts up publishing and querying might fail due to various reasons, for + // example due to being not yet fully bootstrapped on the DHT. Thus one should retry rather + // sooner than later. On the other hand, a long running node is likely well connected and + // thus timely retries are not needed. For this reasoning use an exponentially increasing + // interval for `publish_interval`, `query_interval` and `priority_group_set_interval` + // instead of a constant interval. + let publish_interval = ExpIncInterval::new( + Duration::from_secs(2), + config.max_publish_interval, ); - - // External addresses of remote authorities can change at any given point in time. The - // interval on which to trigger new queries for the current authorities is a trade off - // between efficiency and performance. - let query_interval_start = Instant::now() + config.query_start_delay; - let query_interval_duration = config.query_interval; - let query_interval = interval_at(query_interval_start, query_interval_duration); - - // Querying 500 [`AuthorityId`]s takes ~1m on the Kusama DHT (10th of August 2020) when - // comparing `authority_discovery_authority_addresses_requested_total` and - // `authority_discovery_dht_event_received`. With that in mind set the peerset priority - // group on the same interval as the [`query_interval`] above, - // just delayed by 5 minutes by default. - let priority_group_set_interval = interval_at( - query_interval_start + config.priority_group_set_offset, - config.priority_group_set_interval, + let query_interval = ExpIncInterval::new( + Duration::from_secs(2), + config.max_query_interval, + ); + let priority_group_set_interval = ExpIncInterval::new( + Duration::from_secs(2), + // Trade-off between node connection churn and connectivity. Using half of + // [`crate::WorkerConfig::max_query_interval`] to update priority group once at the + // beginning and once in the middle of each query interval. + config.max_query_interval / 2, ); let addr_cache = AddrCache::new(); @@ -413,7 +405,7 @@ where } if log_enabled!(log::Level::Debug) { - let hashes = v.iter().map(|(hash, _value)| hash.clone()); + let hashes: Vec<_> = v.iter().map(|(hash, _value)| hash.clone()).collect(); debug!( target: LOG_TARGET, "Value for hash '{:?}' found on Dht.", hashes, @@ -449,6 +441,11 @@ where } }, DhtEvent::ValuePut(hash) => { + // Fast forward the exponentially increasing interval to the configured maximum. In + // case this was the first successful address publishing there is no need for a + // timely retry. + self.publish_interval.set_to_max(); + if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put"]).inc(); } @@ -618,8 +615,8 @@ where } /// NetworkProvider provides [`Worker`] with all necessary hooks into the -/// underlying Substrate networking. Using this trait abstraction instead of [`NetworkService`] -/// directly is necessary to unit test [`Worker`]. +/// underlying Substrate networking. Using this trait abstraction instead of +/// [`sc_network::NetworkService`] directly is necessary to unit test [`Worker`]. #[async_trait] pub trait NetworkProvider: NetworkStateInfo { /// Modify a peerset priority group. @@ -661,16 +658,6 @@ fn hash_authority_id(id: &[u8]) -> libp2p::kad::record::Key { libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) } -fn interval_at(start: Instant, duration: Duration) -> Interval { - let stream = futures::stream::unfold(start, move |next| { - let time_until_next = next.saturating_duration_since(Instant::now()); - - Delay::new(time_until_next).map(move |_| Some(((), next + duration))) - }); - - Box::new(stream) -} - /// Prometheus metrics for a [`Worker`]. #[derive(Clone)] pub(crate) struct Metrics { diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index a2cd3f33e92154668f1fc27dff7e13b286a74ce3..75fcaa840176656998ef413fe4315587d39b27a5 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -139,7 +139,7 @@ fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { mod tests { use super::*; - use libp2p::multihash; + use libp2p::multihash::{self, Multihash}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use rand::Rng; @@ -163,7 +163,7 @@ mod tests { fn arbitrary(g: &mut G) -> Self { let seed: [u8; 32] = g.gen(); let peer_id = PeerId::from_multihash( - multihash::wrap(multihash::Code::Sha2_256, &seed) + Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap() ).unwrap(); let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() .unwrap() diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 12adb8f23251406a1d16c5d07d39d34d72113419..fee861dfeb0c7da2c09646ab1eada17261401872 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -37,66 +37,6 @@ use substrate_test_runtime_client::runtime::Block; use super::*; -#[test] -fn interval_at_with_start_now() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now(), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_secs(1), - "Expected low resolution instant interval to fire within less than a second.", - ); -} - -#[test] -fn interval_at_is_queuing_ticks() { - let start = Instant::now(); - - let interval = interval_at(start, std::time::Duration::from_millis(100)); - - // Let's wait for 200ms, thus 3 elements should be queued up (1st at 0ms, 2nd at 100ms, 3rd - // at 200ms). - std::thread::sleep(Duration::from_millis(200)); - - futures::executor::block_on(async { - interval.take(3).collect::>().await; - }); - - // Make sure we did not wait for more than 300 ms, which would imply that `at_interval` is - // not queuing ticks. - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_millis(300), - "Expect interval to /queue/ events when not polled for a while.", - ); -} - -#[test] -fn interval_at_with_initial_delay() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now() + Duration::from_millis(100), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) > Duration::from_millis(100), - "Expected interval with initial delay not to fire right away.", - ); -} - #[derive(Clone)] pub(crate) struct TestApi { pub(crate) authorities: Vec, diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 2fe7ba72ec7b93c373f28a70a2075e92d4ca960d..065acbde2cc95ea6019afc59f977305315774374 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -44,7 +44,7 @@ use sc_proposer_metrics::MetricsLink as PrometheusMetrics; /// Default maximum block size in bytes used by [`Proposer`]. /// -/// Can be overwritten by [`ProposerFactory::set_maxium_block_size`]. +/// Can be overwritten by [`ProposerFactory::set_maximum_block_size`]. /// /// Be aware that there is also an upper packet size on what the networking code /// will accept. If the block doesn't fit in such a package, it can not be @@ -208,10 +208,7 @@ impl sp_consensus::Proposer for })); async move { - match rx.await { - Ok(x) => x, - Err(err) => Err(sp_blockchain::Error::Msg(err.to_string())) - } + rx.await? }.boxed() } } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 9b0c4915082316126858aa8d98690950c518aedd..2cb66d4ccc40b7129dc8a67762701f8245e82d1b 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -71,4 +71,4 @@ mod basic_authorship; -pub use crate::basic_authorship::{ProposerFactory, Proposer}; +pub use crate::basic_authorship::{ProposerFactory, Proposer, DEFAULT_MAX_BLOCK_SIZE}; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 8a38bb8478003d3e2fb4933d85ab4b30748df4b9..cc1431ea349bf807ca3ea46974cdd64df7e53084 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -212,7 +212,7 @@ where &state, changes_trie_state.as_ref(), parent_hash, - )?; + ).map_err(|e| sp_blockchain::Error::StorageChanges(e))?; Ok(BuiltBlock { block: ::new(header, self.extrinsics), diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index b0662c5eddf7aa9296374fb888a87eb979b7ac48..02d14d0d1941c1ac70a52460c9990dbaeb260e02 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -16,12 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.11" atty = "0.2.13" regex = "1.4.2" -lazy_static = "1.4.0" -ansi_term = "0.12.1" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.4" fdlimit = "0.2.1" -libp2p = "0.30.1" +libp2p = "0.32.2" parity-scale-codec = "1.3.0" hex = "0.4.2" rand = "0.7.3" @@ -45,17 +43,18 @@ structopt = "0.3.8" sc-tracing = { version = "2.0.0", path = "../tracing" } chrono = "0.4.10" serde = "1.0.111" -tracing = "0.1.10" +tracing = "0.1.22" tracing-log = "0.1.1" -tracing-subscriber = "0.2.10" +tracing-subscriber = "0.2.15" sc-cli-proc-macro = { version = "2.0.0", path = "./proc-macro" } thiserror = "1.0.21" [target.'cfg(not(target_os = "unknown"))'.dependencies] -rpassword = "4.0.1" +rpassword = "5.0.0" [dev-dependencies] tempfile = "3.1.0" +ansi_term = "0.12.1" [features] wasmtime = [ diff --git a/client/cli/src/commands/insert.rs b/client/cli/src/commands/insert.rs index fc307e45e7ce427bf2c1705447e97038db38ee85..8b7fe98fc0b9d31a76d813f384717ea32a8bb8e7 100644 --- a/client/cli/src/commands/insert.rs +++ b/client/cli/src/commands/insert.rs @@ -65,7 +65,7 @@ impl InsertCmd { .ok_or_else(|| Error::MissingBasePath)?; let (keystore, public) = match self.keystore_params.keystore_config(base_path)? { - KeystoreConfig::Path { path, password } => { + (_, KeystoreConfig::Path { path, password }) => { let public = with_crypto_scheme!( self.crypto_scheme.scheme, to_vec(&suri, password.clone()) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 019b760e5b4aefc394b01fb29e29ece2ee1956e8..48bad16afb677336772990404841ed1f9f08d1b9 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -75,7 +75,8 @@ pub struct RunCmd { /// Listen to all RPC interfaces. /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// server to filter out dangerous methods. More details: + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[structopt(long = "rpc-external")] pub rpc_external: bool, @@ -105,7 +106,7 @@ pub struct RunCmd { /// Listen to all Websocket interfaces. /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// server to filter out dangerous methods. More details: . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[structopt(long = "ws-external")] pub ws_external: bool, @@ -142,7 +143,7 @@ pub struct RunCmd { /// /// A comma-separated list of origins (protocol://domain or special `null` /// value). Value of `all` will disable origin validation. Default is to - /// allow localhost and https://polkadot.js.org origins. When running in + /// allow localhost and origins. When running in /// --dev mode the default is to allow all origins. #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] pub rpc_cors: Option, diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 6e48d04e1328bda8ca234dd3e4bec2f2d1263f9b..25c7294fd1e0afa906338c85e3f6e8183da25e9a 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -57,7 +57,7 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { /// 2. Try to construct the `Pair` while using `uri` as input for [`sp_core::Pair::from_string_with_seed`]. /// /// 3. Try to construct the `Pair::Public` while using `uri` as input for -/// [`sp_core::Pair::Public::from_string_with_version`]. +/// [`sp_core::crypto::Ss58Codec::from_string_with_version`]. pub fn print_from_uri( uri: &str, password: Option, diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index ab7a335c1ce646a0b14fafbe25f25c0c688d4b15..4e1ad19fc46f57569459965b26de22681c82e3d8 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -47,7 +47,7 @@ const RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT: u64 = 10_000; /// Default configuration values used by Substrate /// -/// These values will be used by [`CliConfiguritation`] to set +/// These values will be used by [`CliConfiguration`] to set /// default values for e.g. the listen port or the RPC port. pub trait DefaultConfigurationValues { /// The port Substrate should listen on for p2p connections. @@ -188,10 +188,10 @@ pub trait CliConfiguration: Sized { /// /// Bu default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses /// `KeystoreConfig::InMemory`. - fn keystore_config(&self, base_path: &PathBuf) -> Result { + fn keystore_config(&self, base_path: &PathBuf) -> Result<(Option, KeystoreConfig)> { self.keystore_params() .map(|x| x.keystore_config(base_path)) - .unwrap_or(Ok(KeystoreConfig::InMemory)) + .unwrap_or_else(|| Ok((None, KeystoreConfig::InMemory))) } /// Get the database cache size. @@ -408,22 +408,18 @@ pub trait CliConfiguration: Sized { /// Get the tracing targets from the current object (if any) /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// By default this is retrieved from [`SharedParams`] if it is available. Otherwise its /// `None`. fn tracing_targets(&self) -> Result> { - Ok(self.import_params() - .map(|x| x.tracing_targets()) - .unwrap_or_else(|| Default::default())) + Ok(self.shared_params().tracing_targets()) } /// Get the TracingReceiver value from the current object /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// By default this is retrieved from [`SharedParams`] if it is available. Otherwise its /// `TracingReceiver::default()`. fn tracing_receiver(&self) -> Result { - Ok(self.import_params() - .map(|x| x.tracing_receiver()) - .unwrap_or_default()) + Ok(self.shared_params().tracing_receiver()) } /// Get the node key from the current object @@ -475,6 +471,7 @@ pub trait CliConfiguration: Sized { let role = self.role(is_dev)?; let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); let is_validator = role.is_network_authority(); + let (keystore_remote, keystore) = self.keystore_config(&config_dir)?; let unsafe_pruning = self .import_params() @@ -495,7 +492,8 @@ pub trait CliConfiguration: Sized { node_key, DCV::p2p_listen_port(), )?, - keystore: self.keystore_config(&config_dir)?, + keystore_remote, + keystore, database: self.database_config(&config_dir, database_cache_size, database)?, state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, @@ -519,6 +517,7 @@ pub trait CliConfiguration: Sized { dev_key_seed: self.dev_key_seed(is_dev)?, tracing_targets: self.tracing_targets()?, tracing_receiver: self.tracing_receiver()?, + disable_log_reloading: self.is_log_filter_reloading_disabled()?, chain_spec, max_runtime_instances, announce_block: self.announce_block()?, @@ -538,6 +537,11 @@ pub trait CliConfiguration: Sized { Ok(self.shared_params().log_filters().join(",")) } + /// Is log reloading disabled (enabled by default) + fn is_log_filter_reloading_disabled(&self) -> Result { + Ok(self.shared_params().is_log_filter_reloading_disabled()) + } + /// Initialize substrate. This must be done only once per process. /// /// This method: @@ -549,12 +553,16 @@ pub trait CliConfiguration: Sized { let logger_pattern = self.log_filters()?; let tracing_receiver = self.tracing_receiver()?; let tracing_targets = self.tracing_targets()?; + let disable_log_reloading = self.is_log_filter_reloading_disabled()?; sp_panic_handler::set(&C::support_url(), &C::impl_version()); - if let Err(e) = init_logger(&logger_pattern, tracing_receiver, tracing_targets) { - log::warn!("💬 Problem initializing global logging framework: {:}", e) - } + init_logger( + &logger_pattern, + tracing_receiver, + tracing_targets, + disable_log_reloading, + )?; if let Some(new_limit) = fdlimit::raise_fd_limit() { if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index 36c963f3e8c97e72e9d27486f0664db354a6b5ce..5190cae2c2ff89edc1b9a3916afe428c3969f51a 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -25,35 +25,32 @@ pub type Result = std::result::Result; /// Error type for the CLI. #[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Io error #[error(transparent)] Io(#[from] std::io::Error), - /// Cli error + #[error(transparent)] Cli(#[from] structopt::clap::Error), - /// Service error + #[error(transparent)] Service(#[from] sc_service::Error), - /// Client error + #[error(transparent)] Client(#[from] sp_blockchain::Error), - /// scale codec error + #[error(transparent)] Codec(#[from] parity_scale_codec::Error), - /// Input error + #[error("Invalid input: {0}")] Input(String), - /// Invalid listen multiaddress + #[error("Invalid listen multiaddress")] InvalidListenMultiaddress, - /// Application specific error chain sequence forwarder. - #[error(transparent)] - Application(#[from] Box), - /// URI error. + #[error("Invalid URI; expecting either a secret URI or a public URI.")] InvalidUri(crypto::PublicError), - /// Signature length mismatch. + #[error("Signature has an invalid length. Read {read} bytes, expected {expected} bytes")] SignatureInvalidLength { /// Amount of signature bytes read. @@ -61,28 +58,28 @@ pub enum Error { /// Expected number of signature bytes. expected: usize, }, - /// Missing base path argument. + #[error("The base path is missing, please provide one")] MissingBasePath, - /// Unknown key type specifier or missing key type specifier. + #[error("Unknown key type, must be a known 4-character sequence")] KeyTypeInvalid, - /// Signature verification failed. + #[error("Signature verification failed")] SignatureInvalid, - /// Storing a given key failed. + #[error("Key store operation failed")] KeyStoreOperation, - /// An issue with the underlying key storage was encountered. + #[error("Key storage issue encountered")] KeyStorage(#[from] sc_keystore::Error), - /// Bytes are not decodable when interpreted as hexadecimal string. - #[error("Invalid hex base data")] + + #[error("Invalid hexadecimal string data")] HexDataConversion(#[from] hex::FromHexError), - /// Shortcut type to specify types on the fly, discouraged. - #[deprecated = "Use `Forwarded` with an error type instead."] - #[error("Other: {0}")] - Other(String), + + /// Application specific error chain sequence forwarder. + #[error(transparent)] + Application(#[from] Box), } impl std::convert::From<&str> for Error { @@ -93,7 +90,7 @@ impl std::convert::From<&str> for Error { impl std::convert::From for Error { fn from(s: String) -> Error { - Error::Input(s.to_string()) + Error::Input(s) } } diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index b543f80a9d3b3b899191e9937d647d25222e86f1..80882924bd3ad2c67a2e16aba571c5a58bc46640 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -27,7 +27,6 @@ pub mod arg_enums; mod commands; mod config; mod error; -mod logging; mod params; mod runner; @@ -48,8 +47,13 @@ use structopt::{ StructOpt, }; use tracing_subscriber::{ - filter::Directive, fmt::time::ChronoLocal, layer::SubscriberExt, FmtSubscriber, Layer, + fmt::time::ChronoLocal, + EnvFilter, + FmtSubscriber, + Layer, + layer::SubscriberExt, }; +pub use sc_tracing::logging; pub use logging::PREFIX_LOG_SPAN; #[doc(hidden)] @@ -243,12 +247,16 @@ pub fn init_logger( pattern: &str, tracing_receiver: sc_tracing::TracingReceiver, profiling_targets: Option, + disable_log_reloading: bool, ) -> std::result::Result<(), String> { - fn parse_directives(dirs: impl AsRef) -> Vec { - dirs.as_ref() - .split(',') - .filter_map(|s| s.parse().ok()) - .collect() + use sc_tracing::parse_default_directive; + + // Accept all valid directives and print invalid ones + fn parse_user_directives(mut env_filter: EnvFilter, dirs: &str) -> std::result::Result { + for dir in dirs.split(',') { + env_filter = env_filter.add_directive(parse_default_directive(&dir)?); + } + Ok(env_filter) } if let Err(e) = tracing_log::LogTracer::init() { @@ -257,33 +265,35 @@ pub fn init_logger( )) } - let mut env_filter = tracing_subscriber::EnvFilter::default() + // Initialize filter - ensure to use `parse_default_directive` for any defaults to persist + // after log filter reloading by RPC + let mut env_filter = EnvFilter::default() + // Enable info + .add_directive(parse_default_directive("info") + .expect("provided directive is valid")) // Disable info logging by default for some modules. - .add_directive("ws=off".parse().expect("provided directive is valid")) - .add_directive("yamux=off".parse().expect("provided directive is valid")) - .add_directive("cranelift_codegen=off".parse().expect("provided directive is valid")) + .add_directive(parse_default_directive("ws=off") + .expect("provided directive is valid")) + .add_directive(parse_default_directive("yamux=off") + .expect("provided directive is valid")) + .add_directive(parse_default_directive("cranelift_codegen=off") + .expect("provided directive is valid")) // Set warn logging by default for some modules. - .add_directive("cranelift_wasm=warn".parse().expect("provided directive is valid")) - .add_directive("hyper=warn".parse().expect("provided directive is valid")) - // Enable info for others. - .add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()); + .add_directive(parse_default_directive("cranelift_wasm=warn") + .expect("provided directive is valid")) + .add_directive(parse_default_directive("hyper=warn") + .expect("provided directive is valid")); if let Ok(lvl) = std::env::var("RUST_LOG") { if lvl != "" { - // We're not sure if log or tracing is available at this moment, so silently ignore the - // parse error. - for directive in parse_directives(lvl) { - env_filter = env_filter.add_directive(directive); - } + env_filter = parse_user_directives(env_filter, &lvl)?; } } if pattern != "" { // We're not sure if log or tracing is available at this moment, so silently ignore the // parse error. - for directive in parse_directives(pattern) { - env_filter = env_filter.add_directive(directive); - } + env_filter = parse_user_directives(env_filter, pattern)?; } // If we're only logging `INFO` entries then we'll use a simplified logging format. @@ -293,19 +303,16 @@ pub fn init_logger( }; // Always log the special target `sc_tracing`, overrides global level. + // Required because profiling traces are emitted via `sc_tracing` // NOTE: this must be done after we check the `max_level_hint` otherwise // it is always raised to `TRACE`. env_filter = env_filter.add_directive( - "sc_tracing=trace" - .parse() - .expect("provided directive is valid"), + parse_default_directive("sc_tracing=trace").expect("provided directive is valid") ); // Make sure to include profiling targets in the filter if let Some(profiling_targets) = profiling_targets.clone() { - for directive in parse_directives(profiling_targets) { - env_filter = env_filter.add_directive(directive); - } + env_filter = parse_user_directives(env_filter, &profiling_targets)?; } let enable_color = atty::is(atty::Stream::Stderr); @@ -315,22 +322,42 @@ pub fn init_logger( "%Y-%m-%d %H:%M:%S%.3f".to_string() }); - let subscriber = FmtSubscriber::builder() + let subscriber_builder = FmtSubscriber::builder() .with_env_filter(env_filter) - .with_writer(std::io::stderr) + .with_writer(std::io::stderr as _) .event_format(logging::EventFormat { timer, + enable_color, display_target: !simple, display_level: !simple, display_thread_name: !simple, - enable_color, - }) - .finish() - .with(logging::NodeNameLayer); + }); + if disable_log_reloading { + let subscriber = subscriber_builder + .finish() + .with(logging::NodeNameLayer); + initialize_tracing(subscriber, tracing_receiver, profiling_targets) + } else { + let subscriber_builder = subscriber_builder.with_filter_reloading(); + let handle = subscriber_builder.reload_handle(); + sc_tracing::set_reload_handle(handle); + let subscriber = subscriber_builder + .finish() + .with(logging::NodeNameLayer); + initialize_tracing(subscriber, tracing_receiver, profiling_targets) + } +} +fn initialize_tracing( + subscriber: S, + tracing_receiver: sc_tracing::TracingReceiver, + profiling_targets: Option, +) -> std::result::Result<(), String> +where + S: tracing::Subscriber + Send + Sync + 'static, +{ if let Some(profiling_targets) = profiling_targets { let profiling = sc_tracing::ProfilingLayer::new(tracing_receiver, &profiling_targets); - if let Err(e) = tracing::subscriber::set_global_default(subscriber.with(profiling)) { return Err(format!( "Registering Substrate tracing subscriber failed: {:}!", e @@ -339,7 +366,7 @@ pub fn init_logger( } else { if let Err(e) = tracing::subscriber::set_global_default(subscriber) { return Err(format!( - "Registering Substrate tracing subscriber failed: {:}!", e + "Registering Substrate tracing subscriber failed: {:}!", e )) } } @@ -356,7 +383,7 @@ mod tests { #[test] fn test_logger_filters() { let test_pattern = "afg=debug,sync=trace,client=warn,telemetry,something-with-dash=error"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); + init_logger(&test_pattern, Default::default(), Default::default(), false).unwrap(); tracing::dispatcher::get_default(|dispatcher| { let test_filter = |target, level| { @@ -415,7 +442,7 @@ mod tests { fn log_something_with_dash_target_name() { if env::var("ENABLE_LOGGING").is_ok() { let test_pattern = "test-target=info"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); + init_logger(&test_pattern, Default::default(), Default::default(), false).unwrap(); log::info!(target: "test-target", "{}", EXPECTED_LOG_MESSAGE); } @@ -450,7 +477,8 @@ mod tests { #[test] fn prefix_in_log_lines_entrypoint() { if env::var("ENABLE_LOGGING").is_ok() { - init_logger("", Default::default(), Default::default()).unwrap(); + let test_pattern = "test-target=info"; + init_logger(&test_pattern, Default::default(), Default::default(), false).unwrap(); prefix_in_log_lines_process(); } } @@ -466,7 +494,7 @@ mod tests { #[test] fn do_not_write_with_colors_on_tty_entrypoint() { if env::var("ENABLE_LOGGING").is_ok() { - init_logger("", Default::default(), Default::default()).unwrap(); + init_logger("", Default::default(), Default::default(), false).unwrap(); log::info!("{}", ansi_term::Colour::Yellow.paint(EXPECTED_LOG_MESSAGE)); } } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 1efd4383432fb51a82674cd06b5944fcf219013f..376a72b8421f5446e4ccdfae606145478d329c73 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::arg_enums::{ - ExecutionStrategy, TracingReceiver, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + ExecutionStrategy, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, }; @@ -73,32 +73,9 @@ pub struct ImportParams { default_value = "67108864" )] pub state_cache_size: usize, - - /// Comma separated list of targets for tracing. - #[structopt(long = "tracing-targets", value_name = "TARGETS")] - pub tracing_targets: Option, - - /// Receiver to process tracing messages. - #[structopt( - long = "tracing-receiver", - value_name = "RECEIVER", - possible_values = &TracingReceiver::variants(), - case_insensitive = true, - default_value = "Log" - )] - pub tracing_receiver: TracingReceiver, } impl ImportParams { - /// Receiver to process tracing messages. - pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { - self.tracing_receiver.clone().into() - } - - /// Comma separated list of targets for tracing. - pub fn tracing_targets(&self) -> Option { - self.tracing_targets.clone() - } /// Specify the state cache size. pub fn state_cache_size(&self) -> usize { diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 3c04d63144595680394e00fecd4a699ed446a304..f03fafeb965c0bb3489972816bd7adc18192b48e 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -22,8 +22,7 @@ use std::fs; use std::path::PathBuf; use structopt::StructOpt; use crate::error; -use sp_core::crypto::{SecretString, Zeroize}; -use std::str::FromStr; +use sp_core::crypto::SecretString; /// default sub directory for the key store const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; @@ -31,6 +30,9 @@ const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; /// Parameters of the keystore #[derive(Debug, StructOpt)] pub struct KeystoreParams { + /// Specify custom URIs to connect to for keystore-services + #[structopt(long = "keystore-uri")] + pub keystore_uri: Option, /// Specify custom keystore path. #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] pub keystore_path: Option, @@ -68,25 +70,21 @@ pub fn secret_string_from_str(s: &str) -> std::result::Result Result { + /// returns a vector of remote-urls and the local Keystore configuration + pub fn keystore_config(&self, base_path: &PathBuf) -> Result<(Option, KeystoreConfig)> { + let password = if self.password_interactive { #[cfg(not(target_os = "unknown"))] { - let mut password = input_keystore_password()?; - let secret = std::str::FromStr::from_str(password.as_str()) - .map_err(|()| "Error reading password")?; - password.zeroize(); - Some(secret) + let password = input_keystore_password()?; + Some(SecretString::new(password)) } #[cfg(target_os = "unknown")] None } else if let Some(ref file) = self.password_filename { - let mut password = fs::read_to_string(file) + let password = fs::read_to_string(file) .map_err(|e| format!("{}", e))?; - let secret = std::str::FromStr::from_str(password.as_str()) - .map_err(|()| "Error reading password")?; - password.zeroize(); - Some(secret) + Some(SecretString::new(password)) } else { self.password.clone() }; @@ -96,7 +94,7 @@ impl KeystoreParams { .clone() .unwrap_or_else(|| base_path.join(DEFAULT_KEYSTORE_CONFIG_PATH)); - Ok(KeystoreConfig::Path { path, password }) + Ok((self.keystore_uri.clone(), KeystoreConfig::Path { path, password })) } /// helper method to fetch password from `KeyParams` or read from stdin @@ -104,10 +102,8 @@ impl KeystoreParams { let (password_interactive, password) = (self.password_interactive, self.password.clone()); let pass = if password_interactive { - let mut password = rpassword::read_password_from_tty(Some("Key password: "))?; - let pass = Some(FromStr::from_str(&password).map_err(|()| "Error reading password")?); - password.zeroize(); - pass + let password = rpassword::read_password_from_tty(Some("Key password: "))?; + Some(SecretString::new(password)) } else { password }; diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 3276e5b7c4ba07b30f51f3ea28c5874767c4c460..52b1488ea9ccdf9383f2dd6222a478e9b3528fb4 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -19,6 +19,7 @@ use sc_service::config::BasePath; use std::path::PathBuf; use structopt::StructOpt; +use crate::arg_enums::TracingReceiver; /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt)] @@ -44,6 +45,28 @@ pub struct SharedParams { /// By default, all targets log `info`. The global log level can be set with -l. #[structopt(short = "l", long, value_name = "LOG_PATTERN")] pub log: Vec, + + /// Disable feature to dynamically update and reload the log filter. + /// + /// By default this feature is enabled, however it leads to a small performance decrease. + /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this + /// option set. + #[structopt(long = "disable-log-reloading")] + pub disable_log_reloading: bool, + + /// Sets a custom profiling filter. Syntax is the same as for logging: = + #[structopt(long = "tracing-targets", value_name = "TARGETS")] + pub tracing_targets: Option, + + /// Receiver to process tracing messages. + #[structopt( + long = "tracing-receiver", + value_name = "RECEIVER", + possible_values = &TracingReceiver::variants(), + case_insensitive = true, + default_value = "Log" + )] + pub tracing_receiver: TracingReceiver, } impl SharedParams { @@ -75,4 +98,19 @@ impl SharedParams { pub fn log_filters(&self) -> &[String] { &self.log } + + /// Is log reloading disabled + pub fn is_log_filter_reloading_disabled(&self) -> bool { + self.disable_log_reloading + } + + /// Receiver to process tracing messages. + pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { + self.tracing_receiver.clone().into() + } + + /// Comma separated list of targets for tracing. + pub fn tracing_targets(&self) -> Option { + self.tracing_targets.clone() + } } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 97bfb217b9396f5d977bfa911c80a1dc61d4cfdc..246b39771277d3e54bc95db097e852a99447e68e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -47,7 +47,7 @@ use sp_consensus::{ BlockOrigin, Error as ConsensusError, SelectChain, SlotData, BlockCheckParams, ImportResult }; use sp_consensus::import_queue::{ - Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, BoxFinalityProofImport, + Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, }; use sc_client_api::{backend::AuxStore, BlockOf}; use sp_blockchain::{ @@ -836,7 +836,6 @@ pub fn import_queue( slot_duration: SlotDuration, block_import: I, justification_import: Option>, - finality_proof_import: Option>, client: Arc, inherent_data_providers: InherentDataProviders, spawner: &S, @@ -868,7 +867,6 @@ pub fn import_queue( verifier, Box::new(block_import), justification_import, - finality_proof_import, spawner, registry, )) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index c672440d114b507b78f1cb929b2b784f37a50bcb..3f2a583482afb5d7486f44e0f6c45b7e0f5d6111 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -79,9 +79,7 @@ use std::{ any::Any, borrow::Cow, convert::TryInto, }; use sp_consensus::{ImportResult, CanAuthorWith}; -use sp_consensus::import_queue::{ - BoxJustificationImport, BoxFinalityProofImport, -}; +use sp_consensus::import_queue::BoxJustificationImport; use sp_core::crypto::Public; use sp_application_crypto::AppKey; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; @@ -1484,7 +1482,6 @@ pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, - finality_proof_import: Option>, client: Arc, select_chain: SelectChain, inherent_data_providers: InherentDataProviders, @@ -1516,7 +1513,6 @@ pub fn import_queue( verifier, Box::new(block_import), justification_import, - finality_proof_import, spawner, registry, )) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index b31699d13e0c865d6a98805853f8e231ce1f372c..6e0536c85ced76ff1242361e2ab4b4205a990642 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -37,11 +37,11 @@ use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, - import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, + import_queue::{BoxBlockImport, BoxJustificationImport}, }; use sc_network_test::*; use sc_network_test::{Block as TestBlock, PeersClient}; -use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; +use sc_network::config::ProtocolConfig; use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; use sc_client_api::{BlockchainEvents, backend::TransactionFor}; use log::debug; @@ -272,8 +272,6 @@ impl TestNetFactory for BabeTestNet { -> ( BlockImportAdapter, Option>, - Option>, - Option>, Option, ) { @@ -295,8 +293,6 @@ impl TestNetFactory for BabeTestNet { ( BlockImportAdapter::new_full(block_import), None, - None, - None, Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), ) } diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index d025d6aaf689f823456ca58d500cffcf16ed7760..9c4465f82fda1ef57e2e18be6ba88be05518531a 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -84,7 +84,6 @@ pub fn import_queue( ManualSealVerifier, block_import, None, - None, spawner, registry, ) @@ -164,10 +163,10 @@ pub async fn run_manual_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, + E::Proposer: Proposer>, CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, + TransactionFor: 'static, { while let Some(command) = commands_stream.next().await { match command { @@ -231,9 +230,9 @@ pub async fn run_instant_seal( C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, - SC: SelectChain + 'static + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. @@ -349,7 +348,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } } @@ -416,7 +414,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } } @@ -494,7 +491,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true } } diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 58f017f2d41ad41396be3540828ae3c05fee5bba..a4afaa343e9052561204a60cadf731907c863c73 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -87,10 +87,10 @@ pub async fn seal_block( + Send + Sync + 'static, C: HeaderBackend + ProvideRuntimeApi, E: Environment, - >::Error: std::fmt::Display, - >::Error: std::fmt::Display, + E::Proposer: Proposer>, P: txpool::ChainApi, SC: SelectChain, + TransactionFor: 'static, { let future = async { if pool.validated_pool().status().ready == 0 && !create_empty { @@ -111,7 +111,7 @@ pub async fn seal_block( }; let proposer = env.init(&parent) - .map_err(|err| Error::StringError(format!("{}", err))).await?; + .map_err(|err| Error::StringError(format!("{:?}", err))).await?; let id = inherent_data_provider.create_inherent_data()?; let inherents_len = id.len(); @@ -122,7 +122,7 @@ pub async fn seal_block( }; let proposal = proposer.propose(id.clone(), digest, Duration::from_secs(MAX_PROPOSAL_DURATION), false.into()) - .map_err(|err| Error::StringError(format!("{}", err))).await?; + .map_err(|err| Error::StringError(format!("{:?}", err))).await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { return Err(Error::EmptyTransactionPool) @@ -133,6 +133,7 @@ pub async fn seal_block( params.body = Some(body); params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + params.storage_changes = Some(proposal.storage_changes); if let Some(digest_provider) = digest_provider { digest_provider.append_block_import(&parent, &mut params, &id)?; diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index b73b9aa91f802a0cae87e9b590f224100b5b8325..e353ed6358a00fc94f5069bbeff356ba93347cd6 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -56,7 +56,7 @@ use sp_consensus::{ BlockCheckParams, ImportResult, }; use sp_consensus::import_queue::{ - BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, BoxFinalityProofImport, + BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, }; use codec::{Encode, Decode}; use prometheus_endpoint::Registry; @@ -503,7 +503,6 @@ pub type PowImportQueue = BasicQueue; pub fn import_queue( block_import: BoxBlockImport, justification_import: Option>, - finality_proof_import: Option>, algorithm: Algorithm, inherent_data_providers: InherentDataProviders, spawner: &impl sp_core::traits::SpawnNamed, @@ -524,7 +523,6 @@ pub fn import_queue( verifier, block_import, justification_import, - finality_proof_import, spawner, registry, )) diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index d07ef49835b2bf6e6232df2048148ba53c554782..e8bd1f33631eaecfa4ddbfc65ea449dff8219690 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -31,7 +31,8 @@ sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" -log = "0.4.8" +log = "0.4.11" +thiserror = "1.0.21" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index d8601a7c12c68bea5805e6654da80f8fbb7c6095..571766bc44b1ae3c584f384fd7872ff4c4c535e4 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -20,7 +20,8 @@ //! time during which certain events can and/or must occur. This crate //! provides generic functionality for slots. -#![forbid(unsafe_code, missing_docs)] +#![forbid(unsafe_code)] +#![deny(missing_docs)] mod slots; mod aux_schema; @@ -470,6 +471,15 @@ pub enum CheckedHeader { Checked(H, S), } + + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error where T: Debug { + #[error("Slot duration is invalid: {0:?}")] + SlotDurationInvalid(SlotDuration), +} + /// A slot duration. Create with `get_or_compute`. // The internal member should stay private here to maintain invariants of // `get_or_compute`. @@ -483,7 +493,7 @@ impl Deref for SlotDuration { } } -impl SlotData for SlotDuration { +impl SlotData for SlotDuration { /// Get the slot duration in milliseconds. fn slot_duration(&self) -> u64 where T: SlotData, @@ -494,7 +504,7 @@ impl SlotData for SlotDuration { const SLOT_KEY: &'static [u8] = T::SLOT_KEY; } -impl SlotDuration { +impl SlotDuration { /// Either fetch the slot duration from disk or compute it from the /// genesis state. /// @@ -532,10 +542,8 @@ impl SlotDuration { } }?; - if slot_duration.slot_duration() == 0 { - return Err(sp_blockchain::Error::Msg( - "Invalid value for slot_duration: the value must be greater than 0.".into(), - )) + if slot_duration.slot_duration() == 0u64 { + return Err(sp_blockchain::Error::Application(Box::new(Error::SlotDurationInvalid(slot_duration)))) } Ok(slot_duration) @@ -939,7 +947,7 @@ mod test { true, true, true, true, ]; - assert_eq!(backoff, expected); + assert_eq!(backoff.as_slice(), &expected[..]); } #[test] diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index f3c8f1aff9e14fc49613a7a5851dc675f8a5468f..5696922b4fbb3006689a01f602c65554e81f8511 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -350,13 +350,13 @@ impl StateBackend> for BenchmarkingState { } } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_keys_in_child_storage(child_info, f) + state.apply_to_child_keys_while(child_info, f) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 8196a750557a8484ce72fc906b81a9301b1fe808..e32e45a2f314aef92ba3615c4ec615d9156cf13e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -49,6 +49,9 @@ use std::sync::Arc; use std::path::{Path, PathBuf}; use std::io; use std::collections::{HashMap, HashSet}; +use parking_lot::{Mutex, RwLock}; +use linked_hash_map::LinkedHashMap; +use log::{trace, debug, warn}; use sc_client_api::{ UsageInfo, MemoryInfo, IoInfo, MemorySize, @@ -63,7 +66,6 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use sp_database::Transaction; -use parking_lot::RwLock; use sp_core::ChangesTrieConfiguration; use sp_core::offchain::storage::{OffchainOverlayedChange, OffchainOverlayedChanges}; use sp_core::storage::{well_known_keys, ChildInfo}; @@ -83,7 +85,6 @@ use sc_state_db::StateDb; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; -use log::{trace, debug, warn}; // Re-export the Database trait so that one can pass an implementation of it. pub use sp_database::Database; @@ -93,6 +94,7 @@ pub use sc_state_db::PruningMode; pub use bench::BenchmarkingState; const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; +const CACHE_HEADERS: usize = 8; /// Default value for storage cache child ratio. const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); @@ -193,12 +195,12 @@ impl StateBackend> for RefTrackingState { self.state.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state.apply_to_child_keys_while(child_info, f) } fn for_child_keys_with_prefix( @@ -352,12 +354,24 @@ impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { } } +fn cache_header( + cache: &mut LinkedHashMap>, + hash: Hash, + header: Option
, +) { + cache.insert(hash, header); + while cache.len() > CACHE_HEADERS { + cache.pop_front(); + } +} + /// Block database pub struct BlockchainDb { db: Arc>, meta: Arc, Block::Hash>>>, leaves: RwLock>>, header_metadata_cache: Arc>, + header_cache: Mutex>>, } impl BlockchainDb { @@ -369,6 +383,7 @@ impl BlockchainDb { leaves: RwLock::new(leaves), meta: Arc::new(RwLock::new(meta)), header_metadata_cache: Arc::new(HeaderMetadataCache::default()), + header_cache: Default::default(), }) } @@ -407,7 +422,20 @@ impl BlockchainDb { impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + match &id { + BlockId::Hash(h) => { + let mut cache = self.header_cache.lock(); + if let Some(result) = cache.get_refresh(h) { + return Ok(result.clone()); + } + let header = utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; + cache_header(&mut cache, h.clone(), header.clone()); + Ok(header) + } + BlockId::Number(_) => { + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + } + } } fn info(&self) -> sc_client_api::blockchain::Info { @@ -424,12 +452,7 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha fn status(&self, id: BlockId) -> ClientResult { let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), + BlockId::Hash(_) => self.header(id)?.is_some(), BlockId::Number(n) => n <= self.meta.read().best_number, }; match exists { @@ -868,9 +891,7 @@ impl Backend { let is_archive_pruning = config.pruning.is_archive(); let blockchain = BlockchainDb::new(db.clone())?; let meta = blockchain.meta.clone(); - let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from( - format!("State database error: {:?}", e) - ); + let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e); let state_db: StateDb<_, _> = StateDb::new( config.pruning.clone(), !config.source.supports_ref_counting(), @@ -1059,7 +1080,7 @@ impl Backend { trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); let commit = self.storage.state_db.canonicalize_block(&hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(transaction, commit); }; @@ -1117,12 +1138,6 @@ impl Backend { hash, )?; - let header_metadata = CachedHeaderMetadata::from(&pending_block.header); - self.blockchain.insert_header_metadata( - header_metadata.hash, - header_metadata, - ); - transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); if let Some(body) = &pending_block.body { transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); @@ -1195,9 +1210,7 @@ impl Backend { number_u64, &pending_block.header.parent_hash(), changeset, - ).map_err(|e: sc_state_db::Error| - sp_blockchain::Error::from(format!("State database error: {:?}", e)) - )?; + ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(&mut transaction, commit); // Check if need to finalize. Genesis is always finalized instantly. @@ -1271,7 +1284,7 @@ impl Backend { meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); - Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache)) + Some((pending_block.header, number, hash, enacted, retracted, displaced_leaf, is_best, cache)) } else { None }; @@ -1297,7 +1310,11 @@ impl Backend { self.storage.db.commit(transaction)?; + // Apply all in-memory state shanges. + // Code beyond this point can't fail. + if let Some(( + header, number, hash, enacted, @@ -1306,6 +1323,12 @@ impl Backend { is_best, mut cache, )) = imported { + let header_metadata = CachedHeaderMetadata::from(&header); + self.blockchain.insert_header_metadata( + header_metadata.hash, + header_metadata, + ); + cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); cache.sync_cache( &enacted, &retracted, @@ -1352,7 +1375,7 @@ impl Backend { transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); let commit = self.storage.state_db.canonicalize_block(&f_hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; apply_state_commit(transaction, commit); if !f_num.is_zero() { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 0b4b6d4f88ef507bb4da1c7faa5725bcd2550b20..292d3c5162601af1ee02a0463406e62895be4fd9 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -584,12 +584,12 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.exists_child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state.apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -766,12 +766,12 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().exists_child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.caching_state().for_keys_in_child_storage(child_info, f) + self.caching_state().apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index b88e8926be1410910a9e8212a6a80bb4e9e4a9c2..c5ce4b86e12f5a0e05f4da82bf7bf463fe845ff4 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -48,8 +48,8 @@ test-case = "0.3.3" sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } sc-tracing = { version = "2.0.0", path = "../tracing" } -tracing = "0.1.19" -tracing-subscriber = "0.2.10" +tracing = "0.1.22" +tracing-subscriber = "0.2.15" [features] default = [ "std" ] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 64ed23598f47c6a955ae6d9b084217896a7b3d98..8501144a9a989fc7fc141548ad2f7479512dd6f4 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -14,7 +14,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4.8" derive_more = "0.99.2" parity-wasm = "0.41.0" codec = { package = "parity-scale-codec", version = "1.3.4" } @@ -22,8 +21,8 @@ wasmi = "0.6.2" sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } +thiserror = "1.0.21" [features] default = [] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index caed63c183e68cd8eb1f362b085b4a6554b1dcd4..df0eaf8cc26101c7f611bf2f1ee46d07fd1fe6ee 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -25,92 +25,89 @@ use wasmi; pub type Result = std::result::Result; /// Error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Unserializable Data - InvalidData(sp_serializer::Error), - /// Trap occurred during execution - Trap(wasmi::Trap), - /// Wasmi loading/instantiating error - Wasmi(wasmi::Error), - /// Error in the API. Parameter is an error message. - #[from(ignore)] + #[error("Unserializable data encountered")] + InvalidData(#[from] sp_serializer::Error), + + #[error(transparent)] + Trap(#[from] wasmi::Trap), + + #[error(transparent)] + Wasmi(#[from] wasmi::Error), + + #[error("API Error: {0}")] ApiError(String), - /// Method is not found - #[display(fmt="Method not found: '{}'", _0)] - #[from(ignore)] + + #[error("Method not found: '{0}'")] MethodNotFound(String), - /// Code is invalid (expected single byte) - #[display(fmt="Invalid Code: {}", _0)] - #[from(ignore)] + + #[error("Invalid Code (expected single byte): '{0}'")] InvalidCode(String), - /// Could not get runtime version. - #[display(fmt="On-chain runtime does not specify version")] + + #[error("On-chain runtime does not specify version")] VersionInvalid, - /// Externalities have failed. - #[display(fmt="Externalities error")] + + #[error("Externalities error")] Externalities, - /// Invalid index. - #[display(fmt="Invalid index provided")] + + #[error("Invalid index provided")] InvalidIndex, - /// Invalid return type. - #[display(fmt="Invalid type returned (should be u64)")] + + #[error("Invalid type returned (should be u64)")] InvalidReturn, - /// Runtime failed. - #[display(fmt="Runtime error")] + + #[error("Runtime error")] Runtime, - /// Runtime panicked. - #[display(fmt="Runtime panicked: {}", _0)] - #[from(ignore)] + + #[error("Runtime panicked: {0}")] RuntimePanicked(String), - /// Invalid memory reference. - #[display(fmt="Invalid memory reference")] + + #[error("Invalid memory reference")] InvalidMemoryReference, - /// The runtime must provide a global named `__heap_base` of type i32 for specifying where the - /// allocator is allowed to place its data. - #[display(fmt="The runtime doesn't provide a global named `__heap_base`")] + + #[error("The runtime doesn't provide a global named `__heap_base` of type `i32`")] HeapBaseNotFoundOrInvalid, - /// The runtime WebAssembly module is not allowed to have the `start` function. - #[display(fmt="The runtime has the `start` function")] + + #[error("The runtime must not have the `start` function defined")] RuntimeHasStartFn, - /// Some other error occurred + + #[error("Other: {0}")] Other(String), - /// Some error occurred in the allocator - #[display(fmt="Error in allocator: {}", _0)] - Allocator(sp_allocator::Error), - /// Execution of a host function failed. - #[display(fmt="Host function {} execution failed with: {}", _0, _1)] + + #[error(transparent)] + Allocator(#[from] sp_allocator::Error), + + #[error("Host function {0} execution failed with: {1}")] FunctionExecution(String, String), - /// No table is present. - /// - /// Call was requested that requires table but none was present in the instance. - #[display(fmt="No table exported by wasm blob")] + + #[error("No table exported by wasm blob")] NoTable, - /// No table entry is present. - /// - /// Call was requested that requires specific entry in the table to be present. - #[display(fmt="No table entry with index {} in wasm blob exported table", _0)] - #[from(ignore)] + + #[error("No table entry with index {0} in wasm blob exported table")] NoTableEntryWithIndex(u32), - /// Table entry is not a function. - #[display(fmt="Table element with index {} is not a function in wasm blob exported table", _0)] - #[from(ignore)] + + #[error("Table element with index {0} is not a function in wasm blob exported table")] TableElementIsNotAFunction(u32), - /// Function in table is null and thus cannot be called. - #[display(fmt="Table entry with index {} in wasm blob is null", _0)] - #[from(ignore)] + + #[error("Table entry with index {0} in wasm blob is null")] FunctionRefIsNull(u32), -} -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::InvalidData(ref err) => Some(err), - Error::Trap(ref err) => Some(err), - Error::Wasmi(ref err) => Some(err), - _ => None, - } - } + #[error(transparent)] + RuntimeConstruction(#[from] WasmError), + + #[error("Shared memory is not supported")] + SharedMemUnsupported, + + #[error("Imported globals are not supported yet")] + ImportedGlobalsUnsupported, + + #[error("initializer expression can have only up to 2 expressions in wasm 1.0")] + InitializerHasTooManyExpressions, + + #[error("Invalid initializer expression provided {0}")] + InvalidInitializerExpression(String), } impl wasmi::HostError for Error {} @@ -121,9 +118,9 @@ impl From<&'static str> for Error { } } -impl From for Error { - fn from(err: WasmError) -> Error { - Error::Other(err.to_string()) +impl From for Error { + fn from(err: String) -> Error { + Error::Other(err) } } @@ -151,3 +148,5 @@ pub enum WasmError { /// Other error happenend. Other(String), } + +impl std::error::Error for WasmError {} diff --git a/client/executor/common/src/lib.rs b/client/executor/common/src/lib.rs index 7f3864e6152fb2a00122bc6e6c2d2e9c2a4102e8..df839d4ab65232bc0552e18864fdc2ab16903c12 100644 --- a/client/executor/common/src/lib.rs +++ b/client/executor/common/src/lib.rs @@ -17,6 +17,7 @@ //! A set of common definitions that are needed for defining execution engines. #![warn(missing_docs)] +#![deny(unused_crate_dependencies)] pub mod error; pub mod sandbox; diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs index 92a48e14018143944dae245abddb1db481ce4021..564f9dadcbec691e1344872f330a30bd3c543740 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/util.rs @@ -87,15 +87,12 @@ impl DataSegmentsSnapshot { let init_expr = match segment.offset() { Some(offset) => offset.code(), // Return if the segment is passive - None => return Err(Error::from("Shared memory is not supported".to_string())), + None => return Err(Error::SharedMemUnsupported), }; // [op, End] if init_expr.len() != 2 { - return Err(Error::from( - "initializer expression can have only up to 2 expressions in wasm 1.0" - .to_string(), - )); + return Err(Error::InitializerHasTooManyExpressions); } let offset = match &init_expr[0] { Instruction::I32Const(v) => *v as u32, @@ -106,15 +103,10 @@ impl DataSegmentsSnapshot { // At the moment of writing the Substrate Runtime Interface does not provide // any globals. There is nothing that prevents us from supporting this // if/when we gain those. - return Err(Error::from( - "Imported globals are not supported yet".to_string(), - )); + return Err(Error::ImportedGlobalsUnsupported); } insn => { - return Err(Error::from(format!( - "{:?} is not supported as initializer expression in wasm 1.0", - insn - ))) + return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))) } }; diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index ba23e31febee5cd504364a8db11dbeec59029ee3..1a898b92ca9abdbb92935c86143d3a5e161ecf64 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -22,7 +22,7 @@ sp-std = { version = "2.0.0", default-features = false, path = "../../../primiti sp-tasks = { version = "2.0.0", default-features = false, path = "../../../primitives/tasks" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "3.0.0", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/client/executor/runtime-test/build.rs b/client/executor/runtime-test/build.rs index bc07db900c31e50b8661934afa85c06bbae3b719..a83de21db7f0f387534d6feab29fbfc325474a90 100644 --- a/client/executor/runtime-test/build.rs +++ b/client/executor/runtime-test/build.rs @@ -14,13 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { // regular build WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build(); @@ -28,10 +27,9 @@ fn main() { // and building with tracing activated WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .set_file_name("wasm_binary_with_tracing.rs") - .append_to_rust_flags("--cfg feature=\\\"with-tracing\\\"") + .append_to_rust_flags(r#"--cfg feature="with-tracing""#) .build(); } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 404530c1c3ebf738c8a36895cf7473db25b26a1f..bfba4ef039395f87a0dc71c1c3f933526927fe12 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -4,8 +4,8 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") @@ -261,6 +261,17 @@ sp_core::wasm_export_functions! { wasm_tracing::exit(span_id) } + fn test_nested_spans() { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + wasm_tracing::exit(span_id); + } + wasm_tracing::exit(span_id); + } + fn returns_mutable_static() -> u64 { unsafe { MUTABLE_STATIC += 1; diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index c8b763a6b1936612a7ffe8d5aedeb715d214e570..0a00375145fbad9365532720ff68456ff308caea 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -475,8 +475,6 @@ fn offchain_index(wasm_method: WasmExecutionMethod) { #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { - use sp_core::offchain::OffchainStorage; - let mut ext = TestExternalities::default(); let (offchain, state) = testing::TestOffchainExt::new(); ext.register_extension(OffchainExt::new(offchain)); @@ -489,7 +487,7 @@ fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { ).unwrap(), true.encode(), ); - assert_eq!(state.read().persistent_storage.get(b"", b"test"), Some(vec![])); + assert_eq!(state.read().persistent_storage.get(b"test"), Some(vec![])); } #[test_case(WasmExecutionMethod::Interpreted)] @@ -719,6 +717,15 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(span_datum.target, "default"); assert_eq!(span_datum.name, ""); assert_eq!(values.bool_values.get("wasm").unwrap(), &true); + + call_in_wasm( + "test_nested_spans", + Default::default(), + wasm_method, + &mut ext, + ).unwrap(); + let len = traces.lock().unwrap().len(); + assert_eq!(len, 2); } #[test_case(WasmExecutionMethod::Interpreted)] diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 2de169fc8285a14a95e9142b3f0df02e29918970..de14c7b3ba390ab64f687e7cf897130fd86d7c4b 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -758,9 +758,10 @@ mod tests { authorities.add_pending_change(change_d.clone(), &static_is_descendent_of(false)).unwrap(); authorities.add_pending_change(change_e.clone(), &static_is_descendent_of(false)).unwrap(); + // ordered by subtree depth assert_eq!( authorities.pending_changes().collect::>(), - vec![&change_b, &change_a, &change_c, &change_e, &change_d], + vec![&change_a, &change_c, &change_b, &change_e, &change_d], ); } @@ -798,7 +799,7 @@ mod tests { assert_eq!( authorities.pending_changes().collect::>(), - vec![&change_b, &change_a], + vec![&change_a, &change_b], ); // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 4ed96d058ac6b42941705c046984852fdea0b795..97041f4081a720a35f9de0d0479f84a6071fd69b 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -17,7 +17,6 @@ //! Schema for stuff in the aux-db. use std::fmt::Debug; -use std::sync::Arc; use parity_scale_codec::{Encode, Decode}; use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; @@ -28,7 +27,6 @@ use log::{info, warn}; use sp_finality_grandpa::{AuthorityList, SetId, RoundNumber}; use crate::authorities::{AuthoritySet, SharedAuthoritySet, PendingChange, DelayKind}; -use crate::consensus_changes::{SharedConsensusChanges, ConsensusChanges}; use crate::environment::{ CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, }; @@ -38,7 +36,6 @@ const VERSION_KEY: &[u8] = b"grandpa_schema_version"; const SET_STATE_KEY: &[u8] = b"grandpa_completed_round"; const CONCLUDED_ROUNDS: &[u8] = b"grandpa_concluded_rounds"; const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; -const CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; const CURRENT_VERSION: u32 = 2; @@ -122,7 +119,6 @@ pub(crate) fn load_decode(backend: &B, key: &[u8]) -> Cl /// Persistent data kept between runs. pub(crate) struct PersistentData { pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) set_state: SharedVoterSetState, } @@ -272,8 +268,6 @@ pub(crate) fn load_persistent( G: FnOnce() -> ClientResult, { let version: Option = load_decode(backend, VERSION_KEY)?; - let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? - .unwrap_or_else(ConsensusChanges::>::empty); let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); @@ -282,7 +276,6 @@ pub(crate) fn load_persistent( if let Some((new_set, set_state)) = migrate_from_version0::(backend, &make_genesis_round)? { return Ok(PersistentData { authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), }); } @@ -291,7 +284,6 @@ pub(crate) fn load_persistent( if let Some((new_set, set_state)) = migrate_from_version1::(backend, &make_genesis_round)? { return Ok(PersistentData { authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), }); } @@ -321,7 +313,6 @@ pub(crate) fn load_persistent( return Ok(PersistentData { authority_set: set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), }); } @@ -359,7 +350,6 @@ pub(crate) fn load_persistent( Ok(PersistentData { authority_set: genesis_set.into(), set_state: genesis_state.into(), - consensus_changes: Arc::new(consensus_changes.into()), }) } @@ -421,18 +411,6 @@ pub(crate) fn write_concluded_round( backend.insert_aux(&[(&key[..], round_data.encode().as_slice())], &[]) } -/// Update the consensus changes. -pub(crate) fn update_consensus_changes( - set: &ConsensusChanges, - write_aux: F -) -> R where - H: Encode + Clone, - N: Encode + Clone, - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, -{ - write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) -} - #[cfg(test)] pub(crate) fn load_authorities(backend: &B) -> Option> { diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 038d82a8cdc3b3d045d4935b501ba3b48c178a2f..29fe8bc7471a0b74ac91e41bd77ad0cef832617f 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -68,6 +68,8 @@ mod periodic; #[cfg(test)] pub(crate) mod tests; +/// Name of the notifications protocol used by Grandpa. Must be registered towards the networking +/// in order for Grandpa to properly function. pub const GRANDPA_PROTOCOL_NAME: &'static str = "/paritytech/grandpa/1"; // cost scalars for reporting peers. diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index e1685256f7b8dc9ac705f29fd15aeb6b3a384437..27a394a062bc8b99eca9f6af6022f681238991f7 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -62,8 +62,6 @@ impl sc_network_gossip::Network for TestNetwork { let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); } - fn register_notifications_protocol(&self, _: Cow<'static, str>) {} - fn announce(&self, block: Hash, _associated_data: Vec) { let _ = self.sender.unbounded_send(Event::Announce(block)); } diff --git a/client/finality-grandpa/src/consensus_changes.rs b/client/finality-grandpa/src/consensus_changes.rs deleted file mode 100644 index 1ce7b551d0d7c577524b876e508467a8fc12bd75..0000000000000000000000000000000000000000 --- a/client/finality-grandpa/src/consensus_changes.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::sync::Arc; -use parity_scale_codec::{Encode, Decode}; - -/// Consensus-related data changes tracker. -#[derive(Clone, Debug, Encode, Decode)] -pub(crate) struct ConsensusChanges { - pending_changes: Vec<(N, H)>, -} - -impl ConsensusChanges { - /// Create empty consensus changes. - pub(crate) fn empty() -> Self { - ConsensusChanges { pending_changes: Vec::new(), } - } -} - -impl ConsensusChanges { - - /// Returns reference to all pending changes. - pub fn pending_changes(&self) -> &[(N, H)] { - &self.pending_changes - } - - /// Note unfinalized change of consensus-related data. - pub(crate) fn note_change(&mut self, at: (N, H)) { - let idx = self.pending_changes - .binary_search_by_key(&at.0, |change| change.0) - .unwrap_or_else(|i| i); - self.pending_changes.insert(idx, at); - } - - /// Finalize all pending consensus changes that are finalized by given block. - /// Returns true if there any changes were finalized. - pub(crate) fn finalize ::sp_blockchain::Result>>( - &mut self, - block: (N, H), - canonical_at_height: F, - ) -> ::sp_blockchain::Result<(bool, bool)> { - let (split_idx, has_finalized_changes) = self.pending_changes.iter() - .enumerate() - .take_while(|(_, &(at_height, _))| at_height <= block.0) - .fold((None, Ok(false)), |(_, has_finalized_changes), (idx, ref at)| - ( - Some(idx), - has_finalized_changes - .and_then(|has_finalized_changes| if has_finalized_changes { - Ok(has_finalized_changes) - } else { - canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) - }), - )); - - let altered_changes = split_idx.is_some(); - if let Some(split_idx) = split_idx { - self.pending_changes = self.pending_changes.split_off(split_idx + 1); - } - has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) - } -} - -/// Thread-safe consensus changes tracker reference. -pub(crate) type SharedConsensusChanges = Arc>>; diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 9b3a656d0cd8fad741f82fe7e17ff43e653886c4..790be2a22178878dade0e4961193eade7703df41 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -34,10 +34,10 @@ use finality_grandpa::{ BlockNumberOps, Error as GrandpaError, round::State as RoundState, voter, voter_set::VoterSet, }; -use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as ClientError}; +use sp_blockchain::HeaderMetadata; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, Zero, + Block as BlockT, Header as HeaderT, NumberFor, Zero, }; use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; @@ -50,7 +50,6 @@ use sp_consensus::SelectChain; use crate::authorities::{AuthoritySet, SharedAuthoritySet}; use crate::communication::Network as NetworkT; -use crate::consensus_changes::SharedConsensusChanges; use crate::notification::GrandpaJustificationSender; use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; @@ -440,7 +439,6 @@ pub(crate) struct Environment, SC, pub(crate) voters: Arc>, pub(crate) config: Config, pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) network: crate::communication::NetworkBridge, pub(crate) set_id: SetId, pub(crate) voter_set_state: SharedVoterSetState, @@ -1115,7 +1113,6 @@ where finalize_block( self.client.clone(), &self.authority_set, - &self.consensus_changes, Some(self.config.justification_period.into()), hash, number, @@ -1180,7 +1177,6 @@ impl From> for JustificationOrCommit< pub(crate) fn finalize_block( client: Arc, authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, justification_period: Option>, hash: Block::Hash, number: NumberFor, @@ -1215,15 +1211,6 @@ where // FIXME #1483: clone only when changed let old_authority_set = authority_set.clone(); - // holds the old consensus changes in case it is changed below, needed for - // reverting in case of failure - let mut old_consensus_changes = None; - - let mut consensus_changes = consensus_changes.lock(); - let canon_at_height = |canon_number| { - // "true" because the block is finalized - canonical_at_height(&*client, (hash, number), true, canon_number) - }; let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { let status = authority_set.apply_standard_changes( @@ -1233,26 +1220,6 @@ where initial_sync, ).map_err(|e| Error::Safety(e.to_string()))?; - // check if this is this is the first finalization of some consensus changes - let (alters_consensus_changes, finalizes_consensus_changes) = consensus_changes - .finalize((number, hash), &canon_at_height)?; - - if alters_consensus_changes { - old_consensus_changes = Some(consensus_changes.clone()); - - let write_result = crate::aux_schema::update_consensus_changes( - &*consensus_changes, - |insert| apply_aux(import_op, insert, &[]), - ); - - if let Err(e) = write_result { - warn!(target: "afg", "Failed to write updated consensus changes to disk. Bailing."); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - - return Err(e.into()); - } - } - // send a justification notification if a sender exists and in case of error log it. fn notify_justification( justification_sender: Option<&GrandpaJustificationSender>, @@ -1280,9 +1247,7 @@ where let mut justification_required = // justification is always required when block that enacts new authorities // set is finalized - status.new_set_block.is_some() || - // justification is required when consensus changes are finalized - finalizes_consensus_changes; + status.new_set_block.is_some(); // justification is required every N blocks to be able to prove blocks // finalization to remote nodes @@ -1387,57 +1352,7 @@ where Err(e) => { *authority_set = old_authority_set; - if let Some(old_consensus_changes) = old_consensus_changes { - *consensus_changes = old_consensus_changes; - } - Err(CommandOrError::Error(e)) } } } - -/// Using the given base get the block at the given height on this chain. The -/// target block must be an ancestor of base, therefore `height <= base.height`. -pub(crate) fn canonical_at_height>( - provider: &C, - base: (Block::Hash, NumberFor), - base_is_canonical: bool, - height: NumberFor, -) -> Result, ClientError> { - if height > base.1 { - return Ok(None); - } - - if height == base.1 { - if base_is_canonical { - return Ok(Some(base.0)); - } else { - return Ok(provider.hash(height).unwrap_or(None)); - } - } else if base_is_canonical { - return Ok(provider.hash(height).unwrap_or(None)); - } - - let one = NumberFor::::one(); - - // start by getting _canonical_ block with number at parent position and then iterating - // backwards by hash. - let mut current = match provider.header(BlockId::Number(base.1 - one))? { - Some(header) => header, - _ => return Ok(None), - }; - - // we've already checked that base > height above. - let mut steps = base.1 - height - one; - - while steps > NumberFor::::zero() { - current = match provider.header(BlockId::Hash(*current.parent_hash()))? { - Some(header) => header, - _ => return Ok(None), - }; - - steps -= one; - } - - Ok(Some(current.hash())) -} diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 33dd69cc11d6e73c328b2ceb4d0f594a40ba1fae..bf367ab3f4a55e4d6c5e7c06efec686abd46414a 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -16,6 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +// NOTE: should be removed with: https://github.com/paritytech/substrate/pull/7339 +#![allow(dead_code)] + //! GRANDPA block finality proof generation and check. //! //! Finality of block B is proved by providing: @@ -37,7 +40,7 @@ //! of the U) could be returned. use std::sync::Arc; -use log::{trace, warn}; +use log::trace; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sc_client_api::{ @@ -206,34 +209,6 @@ impl FinalityProofProvider } } -impl sc_network::config::FinalityProofProvider for FinalityProofProvider - where - Block: BlockT, - NumberFor: BlockNumberOps, - B: Backend + Send + Sync + 'static, -{ - fn prove_finality( - &self, - for_block: Block::Hash, - request: &[u8], - ) -> Result>, ClientError> { - let request: FinalityProofRequest = Decode::decode(&mut &request[..]) - .map_err(|e| { - warn!(target: "afg", "Unable to decode finality proof request: {}", e.what()); - ClientError::Backend("Invalid finality proof request".to_string()) - })?; - match request { - FinalityProofRequest::Original(request) => prove_finality::<_, _, GrandpaJustification>( - &*self.backend.blockchain(), - &*self.authority_provider, - request.authorities_set_id, - request.last_finalized, - for_block, - ), - } - } -} - /// The effects of block finality. #[derive(Debug, PartialEq)] pub struct FinalityEffects { @@ -290,14 +265,6 @@ struct OriginalFinalityProofRequest { pub last_finalized: H, } -/// Prepare data blob associated with finality proof request. -pub(crate) fn make_finality_proof_request(last_finalized: H, authorities_set_id: u64) -> Vec { - FinalityProofRequest::Original(OriginalFinalityProofRequest { - authorities_set_id, - last_finalized, - }).encode() -} - /// Prepare proof-of-finality for the best possible block in the range: (begin; end]. /// /// It is assumed that the caller already have a proof-of-finality for the block 'begin'. diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 04df95a3187e1bea91bea1c945394faaf6bccef6..89f9d0c16ad7c6d0b52fb8d08a46b5795d7bf646 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -41,7 +41,6 @@ use sp_runtime::traits::{ use crate::{Error, CommandOrError, NewAuthoritySet, VoterCommand}; use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingChange}; -use crate::consensus_changes::SharedConsensusChanges; use crate::environment::finalize_block; use crate::justification::GrandpaJustification; use crate::notification::GrandpaJustificationSender; @@ -61,7 +60,6 @@ pub struct GrandpaBlockImport { select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: HashMap>>, justification_sender: GrandpaJustificationSender, _phantom: PhantomData, @@ -76,7 +74,6 @@ impl Clone for select_chain: self.select_chain.clone(), authority_set: self.authority_set.clone(), send_voter_commands: self.send_voter_commands.clone(), - consensus_changes: self.consensus_changes.clone(), authority_set_hard_forks: self.authority_set_hard_forks.clone(), justification_sender: self.justification_sender.clone(), _phantom: PhantomData, @@ -439,7 +436,6 @@ impl BlockImport // we don't want to finalize on `inner.import_block` let mut justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); let import_result = (&*self.inner).import_block(block, new_cache); let mut imported_aux = { @@ -517,7 +513,7 @@ impl BlockImport ); import_res.unwrap_or_else(|err| { - if needs_justification || enacts_consensus_change { + if needs_justification { debug!(target: "afg", "Imported block #{} that enacts authority set change with \ invalid justification: {:?}, requesting justification from peers.", number, err); imported_aux.bad_justification = true; @@ -535,12 +531,6 @@ impl BlockImport imported_aux.needs_justification = true; } - - // we have imported block with consensus data changes, but without justification - // => remember to create justification when next block will be finalized - if enacts_consensus_change { - self.consensus_changes.lock().note_change((number, hash)); - } } } @@ -561,7 +551,6 @@ impl GrandpaBlockImport>, send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: Vec<(SetId, PendingChange>)>, justification_sender: GrandpaJustificationSender, ) -> GrandpaBlockImport { @@ -605,7 +594,6 @@ impl GrandpaBlockImport BlockStatus for Arc where /// A trait that includes all the client functionalities grandpa requires. /// Ideally this would be a trait alias, we're not there yet. -/// tracking issue https://github.com/rust-lang/rust/issues/41517 +/// tracking issue pub trait ClientForGrandpa: LockImportRun + Finalizer + AuxStore + HeaderMetadata + HeaderBackend @@ -588,7 +586,6 @@ where select_chain.clone(), persistent_data.authority_set.clone(), voter_commands_tx, - persistent_data.consensus_changes.clone(), authority_set_hard_forks, justification_sender.clone(), ), @@ -656,6 +653,10 @@ pub struct GrandpaParams { /// A link to the block import worker. pub link: LinkHalf, /// The Network instance. + /// + /// It is assumed that this network will feed us Grandpa notifications. When using the + /// `sc_network` crate, it is assumed that the Grandpa notifications protocol has been passed + /// to the configuration of the networking. pub network: N, /// If supplied, can be used to hook on telemetry connection established events. pub telemetry_on_connect: Option>, @@ -844,7 +845,6 @@ where network: network.clone(), set_id: persistent_data.authority_set.set_id(), authority_set: persistent_data.authority_set.clone(), - consensus_changes: persistent_data.consensus_changes.clone(), voter_set_state: persistent_data.set_state, metrics: metrics.as_ref().map(|m| m.environment.clone()), justification_sender: Some(justification_sender), @@ -989,7 +989,6 @@ where select_chain: self.env.select_chain.clone(), config: self.env.config.clone(), authority_set: self.env.authority_set.clone(), - consensus_changes: self.env.consensus_changes.clone(), network: self.env.network.clone(), voting_rule: self.env.voting_rule.clone(), metrics: self.env.metrics.clone(), @@ -1071,26 +1070,6 @@ where } } -/// When GRANDPA is not initialized we still need to register the finality -/// tracker inherent provider which might be expected by the runtime for block -/// authoring. Additionally, we register a gossip message validator that -/// discards all GRANDPA messages (otherwise, we end up banning nodes that send -/// us a `Neighbor` message, since there is no registered gossip validator for -/// the engine id defined in the message.) -pub fn setup_disabled_grandpa(network: N) -> Result<(), sp_consensus::Error> -where - N: NetworkT + Send + Clone + 'static, -{ - // We register the GRANDPA protocol so that we don't consider it an anomaly - // to receive GRANDPA messages on the network. We don't process the - // messages. - network.register_notifications_protocol( - From::from(communication::GRANDPA_PROTOCOL_NAME), - ); - - Ok(()) -} - /// Checks if this node has any available keys in the keystore for any authority id in the given /// voter set. Returns the authority id for which keys are available, or `None` if no keys are /// available. diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs deleted file mode 100644 index a7c9a655467c799ed500e6186fd64cc9825b1a7e..0000000000000000000000000000000000000000 --- a/client/finality-grandpa/src/light_import.rs +++ /dev/null @@ -1,880 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::collections::HashMap; -use std::sync::Arc; -use log::{info, trace, warn}; -use parking_lot::RwLock; -use sc_client_api::backend::{AuxStore, Backend, Finalizer, TransactionFor}; -use sp_blockchain::{HeaderBackend, Error as ClientError, well_known_cache_keys}; -use parity_scale_codec::{Encode, Decode}; -use sp_consensus::{ - import_queue::Verifier, - BlockOrigin, BlockImport, FinalityProofImport, BlockImportParams, ImportResult, ImportedAux, - BlockCheckParams, Error as ConsensusError, -}; -use sc_network::config::{BoxFinalityProofRequestBuilder, FinalityProofRequestBuilder}; -use sp_runtime::Justification; -use sp_runtime::traits::{NumberFor, Block as BlockT, Header as HeaderT, DigestFor}; -use sp_finality_grandpa::{self, AuthorityList}; -use sp_runtime::generic::BlockId; - -use crate::GenesisAuthoritySetProvider; -use crate::aux_schema::load_decode; -use crate::consensus_changes::ConsensusChanges; -use crate::environment::canonical_at_height; -use crate::finality_proof::{ - AuthoritySetForFinalityChecker, ProvableJustification, make_finality_proof_request, -}; -use crate::justification::GrandpaJustification; - -/// LightAuthoritySet is saved under this key in aux storage. -const LIGHT_AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; -/// ConsensusChanges is saver under this key in aux storage. -const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; - -/// Create light block importer. -pub fn light_block_import( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, -) -> Result, ClientError> - where - BE: Backend, - Client: crate::ClientForGrandpa, -{ - let info = client.info(); - let import_data = load_aux_import_data( - info.finalized_hash, - &*client, - genesis_authorities_provider, - )?; - Ok(GrandpaLightBlockImport { - client, - backend, - authority_set_provider, - data: Arc::new(RwLock::new(import_data)), - }) -} - -/// A light block-import handler for GRANDPA. -/// -/// It is responsible for: -/// - checking GRANDPA justifications; -/// - fetching finality proofs for blocks that are enacting consensus changes. -pub struct GrandpaLightBlockImport { - client: Arc, - backend: Arc, - authority_set_provider: Arc>, - data: Arc>>, -} - -impl Clone for GrandpaLightBlockImport { - fn clone(&self) -> Self { - GrandpaLightBlockImport { - client: self.client.clone(), - backend: self.backend.clone(), - authority_set_provider: self.authority_set_provider.clone(), - data: self.data.clone(), - } - } -} - -/// Mutable data of light block importer. -struct LightImportData { - last_finalized: Block::Hash, - authority_set: LightAuthoritySet, - consensus_changes: ConsensusChanges>, -} - -/// Latest authority set tracker. -#[derive(Debug, Encode, Decode)] -struct LightAuthoritySet { - set_id: u64, - authorities: AuthorityList, -} - -impl GrandpaLightBlockImport { - /// Create finality proof request builder. - pub fn create_finality_proof_request_builder(&self) -> BoxFinalityProofRequestBuilder { - Box::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _ - } -} - -impl BlockImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, -{ - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - do_import_block::<_, _, _, GrandpaJustification>( - &*self.client, &mut *self.data.write(), block, new_cache - ) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.client.check_block(block) - } -} - -impl FinalityProofImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, -{ - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - let mut out = Vec::new(); - let chain_info = (&*self.client).info(); - - let data = self.data.read(); - for (pending_number, pending_hash) in data.consensus_changes.pending_changes() { - if *pending_number > chain_info.finalized_number - && *pending_number <= chain_info.best_number - { - out.push((*pending_hash, *pending_number)); - } - } - - out - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - do_import_finality_proof::<_, _, _, GrandpaJustification>( - &*self.client, - self.backend.clone(), - &*self.authority_set_provider, - &mut *self.data.write(), - hash, - number, - finality_proof, - verifier, - ) - } -} - -impl LightAuthoritySet { - /// Get a genesis set with given authorities. - pub fn genesis(initial: AuthorityList) -> Self { - LightAuthoritySet { - set_id: sp_finality_grandpa::SetId::default(), - authorities: initial, - } - } - - /// Get latest set id. - pub fn set_id(&self) -> u64 { - self.set_id - } - - /// Get latest authorities set. - pub fn authorities(&self) -> AuthorityList { - self.authorities.clone() - } - - /// Set new authorities set. - pub fn update(&mut self, set_id: u64, authorities: AuthorityList) { - self.set_id = set_id; - self.authorities = authorities; - } -} - -struct GrandpaFinalityProofRequestBuilder(Arc>>); - -impl FinalityProofRequestBuilder for GrandpaFinalityProofRequestBuilder { - fn build_request_data(&mut self, _hash: &B::Hash) -> Vec { - let data = self.0.read(); - make_finality_proof_request( - data.last_finalized, - data.authority_set.set_id(), - ) - } -} - -/// Try to import new block. -fn do_import_block( - mut client: C, - data: &mut LightImportData, - mut block: BlockImportParams>, - new_cache: HashMap>, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - J: ProvableJustification, -{ - let hash = block.post_hash(); - let number = *block.header.number(); - - // we don't want to finalize on `inner.import_block` - let justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); - let import_result = client.import_block(block, new_cache); - - let mut imported_aux = match import_result { - Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => return Ok(r), - Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), - }; - - match justification { - Some(justification) => { - trace!( - target: "afg", - "Imported block {}{}. Importing justification.", - if enacts_consensus_change { " which enacts consensus changes" } else { "" }, - hash, - ); - - do_import_justification::<_, _, _, J>(client, data, hash, number, justification) - }, - None if enacts_consensus_change => { - trace!( - target: "afg", - "Imported block {} which enacts consensus changes. Requesting finality proof.", - hash, - ); - - // remember that we need finality proof for this block - imported_aux.needs_finality_proof = true; - data.consensus_changes.note_change((number, hash)); - Ok(ImportResult::Imported(imported_aux)) - }, - None => Ok(ImportResult::Imported(imported_aux)), - } -} - -/// Try to import finality proof. -fn do_import_finality_proof( - client: C, - backend: Arc, - authority_set_provider: &dyn AuthoritySetForFinalityChecker, - data: &mut LightImportData, - _hash: Block::Hash, - _number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, -) -> Result<(Block::Hash, NumberFor), ConsensusError> - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - DigestFor: Encode, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, -{ - let authority_set_id = data.authority_set.set_id(); - let authorities = data.authority_set.authorities(); - let finality_effects = crate::finality_proof::check_finality_proof::<_, _, J>( - backend.blockchain(), - authority_set_id, - authorities, - authority_set_provider, - finality_proof, - ).map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - // try to import all new headers - let block_origin = BlockOrigin::NetworkBroadcast; - for header_to_import in finality_effects.headers_to_import { - let (block_to_import, new_authorities) = verifier.verify( - block_origin, - header_to_import, - None, - None, - ).map_err(|e| ConsensusError::ClientImport(e))?; - assert!( - block_to_import.justification.is_none(), - "We have passed None as justification to verifier.verify", - ); - - let mut cache = HashMap::new(); - if let Some(authorities) = new_authorities { - cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - } - do_import_block::<_, _, _, J>( - client.clone(), - data, - block_to_import.convert_transaction(), - cache, - )?; - } - - // try to import latest justification - let finalized_block_hash = finality_effects.block; - let finalized_block_number = backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(finality_effects.block)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - do_finalize_block( - client.clone(), - data, - finalized_block_hash, - finalized_block_number, - finality_effects.justification.encode(), - )?; - - // apply new authorities set - data.authority_set.update( - finality_effects.new_set_id, - finality_effects.new_authorities, - ); - - // store new authorities set - require_insert_aux( - &client, - LIGHT_AUTHORITY_SET_KEY, - &data.authority_set, - "authority set", - )?; - - Ok((finalized_block_hash, finalized_block_number)) -} - -/// Try to import justification. -fn do_import_justification( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, -{ - // with justification, we have two cases - // - // optimistic: the same GRANDPA authorities set has generated intermediate justification - // => justification is verified using current authorities set + we could proceed further - // - // pessimistic scenario: the GRANDPA authorities set has changed - // => we need to fetch new authorities set (i.e. finality proof) from remote node - - // first, try to behave optimistically - let authority_set_id = data.authority_set.set_id(); - let justification = J::decode_and_verify( - &justification, - authority_set_id, - &data.authority_set.authorities(), - ); - - // BadJustification error means that justification has been successfully decoded, but - // it isn't valid within current authority set - let justification = match justification { - Err(ClientError::BadJustification(_)) => { - trace!( - target: "afg", - "Justification for {} is not valid within current authorities set. Requesting finality proof.", - hash, - ); - - let mut imported_aux = ImportedAux::default(); - imported_aux.needs_finality_proof = true; - return Ok(ImportResult::Imported(imported_aux)); - }, - Err(e) => { - trace!( - target: "afg", - "Justification for {} is not valid. Bailing.", - hash, - ); - - return Err(ConsensusError::ClientImport(e.to_string())); - }, - Ok(justification) => { - trace!( - target: "afg", - "Justification for {} is valid. Finalizing the block.", - hash, - ); - - justification - }, - }; - - // finalize the block - do_finalize_block(client, data, hash, number, justification.encode()) -} - -/// Finalize the block. -fn do_finalize_block( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, -{ - // finalize the block - client.finalize_block(BlockId::Hash(hash), Some(justification), true).map_err(|e| { - warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); - ConsensusError::ClientImport(e.to_string()) - })?; - - // forget obsoleted consensus changes - let consensus_finalization_res = data.consensus_changes - .finalize( - (number, hash), - |at_height| canonical_at_height(&client, (hash, number), true, at_height) - ); - match consensus_finalization_res { - Ok((true, _)) => require_insert_aux( - &client, - LIGHT_CONSENSUS_CHANGES_KEY, - &data.consensus_changes, - "consensus changes", - )?, - Ok(_) => (), - Err(error) => return Err(on_post_finalization_error(error, "consensus changes")), - } - - // update last finalized block reference - data.last_finalized = hash; - - // we just finalized this block, so if we were importing it, it is now the new best - Ok(ImportResult::imported(true)) -} - -/// Load light import aux data from the store. -fn load_aux_import_data( - last_finalized: Block::Hash, - aux_store: &B, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, -) -> Result, ClientError> - where - B: AuxStore, - Block: BlockT, -{ - let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? { - Some(authority_set) => authority_set, - None => { - info!(target: "afg", "Loading GRANDPA authorities \ - from genesis on what appears to be first startup."); - - // no authority set on disk: fetch authorities from genesis state - let genesis_authorities = genesis_authorities_provider.get()?; - - let authority_set = LightAuthoritySet::genesis(genesis_authorities); - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; - - authority_set - }, - }; - - let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? { - Some(consensus_changes) => consensus_changes, - None => { - let consensus_changes = ConsensusChanges::>::empty(); - - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; - - consensus_changes - }, - }; - - Ok(LightImportData { - last_finalized, - authority_set, - consensus_changes, - }) -} - -/// Insert into aux store. If failed, return error && show inconsistency warning. -fn require_insert_aux( - store: &A, - key: &[u8], - value: &T, - value_type: &str, -) -> Result<(), ConsensusError> { - let encoded = value.encode(); - let update_res = store.insert_aux(&[(key, &encoded[..])], &[]); - if let Err(error) = update_res { - return Err(on_post_finalization_error(error, value_type)); - } - - Ok(()) -} - -/// Display inconsistency warning. -fn on_post_finalization_error(error: ClientError, value_type: &str) -> ConsensusError { - warn!(target: "afg", "Failed to write updated {} to disk. Bailing.", value_type); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - ConsensusError::ClientImport(error.to_string()) -} - -#[cfg(test)] -pub mod tests { - use super::*; - use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; - use sp_finality_grandpa::AuthorityId; - use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof, BlockBackend}; - use substrate_test_runtime_client::runtime::{Block, Header}; - use crate::tests::TestApi; - use crate::finality_proof::{ - FinalityProofFragment, - tests::{TestJustification, ClosureAuthoritySetForFinalityChecker}, - }; - - struct OkVerifier; - - impl Verifier for OkVerifier { - fn verify( - &mut self, - origin: BlockOrigin, - header: Header, - _justification: Option, - _body: Option::Extrinsic>>, - ) -> Result<(BlockImportParams, Option)>>), String> { - Ok((BlockImportParams::new(origin, header), None)) - } - } - - pub struct NoJustificationsImport( - pub GrandpaLightBlockImport - ); - - impl Clone - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - { - fn clone(&self) -> Self { - NoJustificationsImport(self.0.clone()) - } - } - - impl BlockImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - GrandpaLightBlockImport: - BlockImport, Error = ConsensusError> - { - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - block.justification.take(); - self.0.import_block(block, new_cache) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.0.check_block(block) - } - } - - impl FinalityProofImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - BE: Backend + 'static, - DigestFor: Encode, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - { - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - self.0.on_start() - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - self.0.import_finality_proof(hash, number, finality_proof, verifier) - } - } - - /// Creates light block import that ignores justifications that came outside of finality proofs. - pub fn light_block_import_without_justifications( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, - ) -> Result, ClientError> - where - BE: Backend + 'static, - Client: crate::ClientForGrandpa, - { - light_block_import(client, backend, genesis_authorities_provider, authority_set_provider) - .map(NoJustificationsImport) - } - - fn import_block( - new_cache: HashMap>, - justification: Option, - ) -> ( - ImportResult, - substrate_test_runtime_client::client::Client, - Arc, - ) { - let (client, backend) = substrate_test_runtime_client::new_light(); - let mut import_data = LightImportData { - last_finalized: Default::default(), - authority_set: LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[1; 32]), 1)]), - consensus_changes: ConsensusChanges::empty(), - }; - let mut block = BlockImportParams::new( - BlockOrigin::Own, - Header { - number: 1, - parent_hash: client.chain_info().best_hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }, - ); - block.justification = justification; - block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - ( - do_import_block::<_, _, _, TestJustification>( - &client, - &mut import_data, - block, - new_cache, - ).unwrap(), - client, - backend, - ) - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_no_justification_provided() { - assert_eq!(import_block(HashMap::new(), None).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_correct_justification_provided() { - let justification = TestJustification((0, vec![(AuthorityId::from_slice(&[1; 32]), 1)]), Vec::new()).encode(); - assert_eq!(import_block(HashMap::new(), Some(justification)).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_no_justification_provided() { - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!(import_block(cache, None).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_incorrect_justification_provided() { - let justification = TestJustification((0, vec![]), Vec::new()).encode(); - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!( - import_block(cache, Some(justification)).0, - ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: false, - header_only: false, - }, - )); - } - - - #[test] - fn aux_data_updated_on_start() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is empty initially - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_none()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_none()); - - // it is updated on importer start - load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some()); - } - - #[test] - fn aux_data_loaded_on_restart() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is non-empty initially - let mut consensus_changes = ConsensusChanges::::empty(); - consensus_changes.note_change((42, Default::default())); - aux_store.insert_aux( - &[ - ( - LIGHT_AUTHORITY_SET_KEY, - LightAuthoritySet::genesis( - vec![(AuthorityId::from_slice(&[42; 32]), 2)] - ).encode().as_slice(), - ), - ( - LIGHT_CONSENSUS_CHANGES_KEY, - consensus_changes.encode().as_slice(), - ), - ], - &[], - ).unwrap(); - - // importer uses it on start - let data = load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert_eq!(data.authority_set.authorities(), vec![(AuthorityId::from_slice(&[42; 32]), 2)]); - assert_eq!(data.consensus_changes.pending_changes(), &[(42, Default::default())]); - } - - #[test] - fn authority_set_is_updated_on_finality_proof_import() { - let initial_set_id = 0; - let initial_set = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; - let updated_set = vec![(AuthorityId::from_slice(&[2; 32]), 2)]; - let babe_set_signal = vec![AuthorityId::from_slice(&[42; 32])].encode(); - - // import block #1 without justification - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, babe_set_signal); - let (_, client, backend) = import_block(cache, None); - - // import finality proof for block #1 - let hash = client.block_hash(1).unwrap().unwrap(); - let mut verifier = OkVerifier; - let mut import_data = LightImportData { - last_finalized: Default::default(), - authority_set: LightAuthoritySet::genesis(initial_set.clone()), - consensus_changes: ConsensusChanges::empty(), - }; - - // import finality proof - do_import_finality_proof::<_, _, _, TestJustification>( - &client, - backend, - &ClosureAuthoritySetForFinalityChecker( - |_, _, _| Ok(updated_set.clone()) - ), - &mut import_data, - Default::default(), - Default::default(), - vec![ - FinalityProofFragment::
{ - block: hash, - justification: TestJustification( - (initial_set_id, initial_set.clone()), - Vec::new(), - ).encode(), - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![])), - }, - ].encode(), - &mut verifier, - ).unwrap(); - - // verify that new authorities set has been saved to the aux storage - let data = load_aux_import_data(Default::default(), &client, &TestApi::new(initial_set)).unwrap(); - assert_eq!(data.authority_set.authorities(), updated_set); - } -} diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index fd00b35c40a739fab6704d4e09fdac62b9eb276e..c61998225e32300f542f256f70acdc0aa20102a7 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -39,7 +39,6 @@ use crate::{ }; use crate::authorities::SharedAuthoritySet; use crate::communication::{Network as NetworkT, NetworkBridge}; -use crate::consensus_changes::SharedConsensusChanges; use crate::notification::GrandpaJustificationSender; use sp_finality_grandpa::AuthorityId; use std::marker::{PhantomData, Unpin}; @@ -68,7 +67,6 @@ impl<'a, Block, Client> finality_grandpa::Chain> fn grandpa_observer( client: &Arc, authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, voters: &Arc>, justification_sender: &Option>, last_finalized_number: NumberFor, @@ -83,7 +81,6 @@ where Client: crate::ClientForGrandpa, { let authority_set = authority_set.clone(); - let consensus_changes = consensus_changes.clone(); let client = client.clone(); let voters = voters.clone(); let justification_sender = justification_sender.clone(); @@ -123,7 +120,6 @@ where match environment::finalize_block( client.clone(), &authority_set, - &consensus_changes, None, finalized_hash, finalized_number, @@ -293,7 +289,6 @@ where let observer = grandpa_observer( &self.client, &self.persistent_data.authority_set, - &self.persistent_data.consensus_changes, &voters, &self.justification_sender, last_finalized_number, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 44503d3c85d4404aeb432beaca16cd8334bd945a..452b30941de5cb5f5ab9a4d86106df2af76dbc22 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -25,7 +25,7 @@ use sc_network_test::{ Block, BlockImportAdapter, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, TestClient, TestNetFactory, FullPeerConfig, }; -use sc_network::config::{ProtocolConfig, BoxFinalityProofRequestBuilder}; +use sc_network::config::ProtocolConfig; use parking_lot::{RwLock, Mutex}; use futures_timer::Delay; use tokio::runtime::{Runtime, Handle}; @@ -36,22 +36,21 @@ use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; use sp_consensus::{ BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, - import_queue::{BoxJustificationImport, BoxFinalityProofImport}, + import_queue::BoxJustificationImport, }; use std::{collections::{HashMap, HashSet}, pin::Pin}; use parity_scale_codec::Decode; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; use sp_runtime::generic::{BlockId, DigestItem}; -use sp_core::{H256, crypto::Public}; +use sp_core::H256; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof}; use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; use authorities::AuthoritySet; use finality_proof::{ - FinalityProofProvider, AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker, + AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker, }; -use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; use sc_keystore::LocalKeystore; @@ -117,8 +116,6 @@ impl TestNetFactory for GrandpaTestNet { -> ( BlockImportAdapter, Option>, - Option>, - Option>, PeerData, ) { @@ -133,45 +130,12 @@ impl TestNetFactory for GrandpaTestNet { ( BlockImportAdapter::new_full(import), Some(justification_import), - None, - None, Mutex::new(Some(link)), ) }, - PeersClient::Light(ref client, ref backend) => { - use crate::light_import::tests::light_block_import_without_justifications; - - let authorities_provider = Arc::new(self.test_config.clone()); - // forbid direct finalization using justification that came with the block - // => light clients will try to fetch finality proofs - let import = light_block_import_without_justifications( - client.clone(), - backend.clone(), - &self.test_config, - authorities_provider, - ).expect("Could not create block import for fresh peer."); - let finality_proof_req_builder = import.0.create_finality_proof_request_builder(); - let proof_import = Box::new(import.clone()); - ( - BlockImportAdapter::new_light(import), - None, - Some(proof_import), - Some(finality_proof_req_builder), - Mutex::new(None), - ) - }, - } - } - - fn make_finality_proof_provider( - &self, - client: PeersClient - ) -> Option>> { - match client { - PeersClient::Full(_, ref backend) => { - Some(Arc::new(FinalityProofProvider::new(backend.clone(), self.test_config.clone()))) + PeersClient::Light(..) => { + panic!("Light client is not used in tests."); }, - PeersClient::Light(_, _) => None, } } @@ -305,6 +269,52 @@ fn block_until_complete(future: impl Future + Unpin, net: &Arc impl Future { + let voters = stream::FuturesUnordered::new(); + + for (peer_id, key) in peers.iter().enumerate() { + let (keystore, _) = create_keystore(*key); + + let (net_service, link) = { + // temporary needed for some reason + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + ( + net.peers[peer_id].network_service().clone(), + link, + ) + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + + fn assert_send(_: &T) { } + assert_send(&voter); + + voters.push(voter); + } + + voters.for_each(|_| async move {}) +} + // run the voters to completion. provide a closure to be invoked after // the voters are spawned but before blocking on them. fn run_to_completion_with( @@ -324,22 +334,9 @@ fn run_to_completion_with( wait_for.push(f); }; - let mut keystore_paths = Vec::new(); - for (peer_id, key) in peers.iter().enumerate() { - let (keystore, keystore_path) = create_keystore(*key); - keystore_paths.push(keystore_path); - + for (peer_id, _) in peers.iter().enumerate() { let highest_finalized = highest_finalized.clone(); - let (client, net_service, link) = { - let net = net.lock(); - // temporary needed for some reason - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; + let client = net.lock().peers[peer_id].client().clone(); wait_for.push( Box::pin( @@ -355,30 +352,6 @@ fn run_to_completion_with( .map(|_| ()) ) ); - - fn assert_send(_: &T) { } - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - - assert_send(&voter); - - runtime.spawn(voter); } // wait for all finalized on each. @@ -424,6 +397,7 @@ fn finalize_3_voters_no_observers() { let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -450,50 +424,18 @@ fn finalize_3_voters_1_full_observer() { let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - let net = Arc::new(Mutex::new(net)); - let mut finality_notifications = Vec::new(); - - let all_peers = peers.iter() - .cloned() - .map(Some) - .chain(std::iter::once(None)); + runtime.spawn(initialize_grandpa(&mut net, peers)); - let mut keystore_paths = Vec::new(); - - let mut voters = Vec::new(); - - for (peer_id, local_key) in all_peers.enumerate() { - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &20)) - .for_each(move |_| future::ready(())) - ); - - let keystore = if let Some(local_key) = local_key { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - Some(keystore) - } else { - None - }; + runtime.spawn({ + let peer_id = 3; + let net_service = net.peers[peer_id].network_service().clone(); + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, - keystore, + keystore: None, name: Some(format!("peer#{}", peer_id)), is_authority: true, observer_enabled: true, @@ -506,11 +448,21 @@ fn finalize_3_voters_1_full_observer() { shared_voter_state: SharedVoterState::empty(), }; - voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); - } + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); - for voter in voters { - runtime.spawn(voter); + net.peer(0).push_blocks(20, false); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + for peer_id in 0..4 { + let client = net.lock().peers[peer_id].client().clone(); + finality_notifications.push( + client.finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &20)) + .for_each(move |_| future::ready(())) + ); } // wait for all finalized on each. @@ -543,6 +495,13 @@ fn transition_3_voters_twice_1_full_observer() { let observer = &[Ed25519Keyring::One]; + let all_peers = peers_a.iter() + .chain(peers_b) + .chain(peers_c) + .chain(observer) + .cloned() + .collect::>(); // deduplicate + let genesis_voters = make_ids(peers_a); let api = TestApi::new(genesis_voters); @@ -550,6 +509,41 @@ fn transition_3_voters_twice_1_full_observer() { let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); + let mut voters = Vec::new(); + for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { + let (keystore, keystore_path) = create_keystore(local_key); + keystore_paths.push(keystore_path); + + let (net_service, link) = { + let net = net.lock(); + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + ( + net.peers[peer_id].network_service().clone(), + link, + ) + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + + voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); + } + net.lock().peer(0).push_blocks(1, false); net.lock().block_until_sync(); @@ -615,30 +609,13 @@ fn transition_3_voters_twice_1_full_observer() { } let mut finality_notifications = Vec::new(); - let all_peers = peers_a.iter() - .chain(peers_b) - .chain(peers_c) - .chain(observer) - .cloned() - .collect::>() // deduplicate - .into_iter() - .enumerate(); - let mut keystore_paths = Vec::new(); - for (peer_id, local_key) in all_peers { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; + for voter in voters { + runtime.spawn(voter); + } + for (peer_id, _) in all_peers.into_iter().enumerate() { + let client = net.lock().peers[peer_id].client().clone(); finality_notifications.push( client.finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &30)) @@ -651,26 +628,6 @@ fn transition_3_voters_twice_1_full_observer() { assert_eq!(set.pending_changes().count(), 0); }) ); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - - runtime.spawn(voter); } // wait for all finalized on each. @@ -679,24 +636,6 @@ fn transition_3_voters_twice_1_full_observer() { block_until_complete(wait_for, &net, &mut runtime); } -#[test] -fn justification_is_emitted_when_consensus_data_changes() { - let mut runtime = Runtime::new().unwrap(); - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); - - // import block#1 WITH consensus data change - let new_authorities = vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]; - net.peer(0).push_authorities_change_block(new_authorities); - net.block_until_sync(); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - - // ... and check that there's justification for block#1 - assert!(net.lock().peer(0).client().justification(&BlockId::Number(1)).unwrap().is_some(), - "Missing justification for block#1"); -} - #[test] fn justification_is_generated_periodically() { let mut runtime = Runtime::new().unwrap(); @@ -704,6 +643,7 @@ fn justification_is_generated_periodically() { let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(32, false); net.block_until_sync(); @@ -717,25 +657,6 @@ fn justification_is_generated_periodically() { } } -#[test] -fn consensus_changes_works() { - let mut changes = ConsensusChanges::::empty(); - - // pending changes are not finalized - changes.note_change((10, H256::from_low_u64_be(1))); - assert_eq!(changes.finalize((5, H256::from_low_u64_be(5)), |_| Ok(None)).unwrap(), (false, false)); - - // no change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1001)))).unwrap(), (true, false)); - - // change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1)))).unwrap(), (true, true)); -} - #[test] fn sync_justifications_on_change_blocks() { let mut runtime = Runtime::new().unwrap(); @@ -746,6 +667,7 @@ fn sync_justifications_on_change_blocks() { // 4 peers, 3 of them are authorities and participate in grandpa let api = TestApi::new(voters); let mut net = GrandpaTestNet::new(api, 4); + let voters = initialize_grandpa(&mut net, peers_a); // add 20 blocks net.peer(0).push_blocks(20, false); @@ -770,6 +692,7 @@ fn sync_justifications_on_change_blocks() { } let net = Arc::new(Mutex::new(net)); + runtime.spawn(voters); run_to_completion(&mut runtime, 25, net.clone(), peers_a); // the first 3 peers are grandpa voters and therefore have already finalized @@ -807,6 +730,7 @@ fn finalizes_multiple_pending_changes_in_order() { // 6 peers, 3 of them are authorities and participate in grandpa from genesis let api = TestApi::new(genesis_voters); let mut net = GrandpaTestNet::new(api, 6); + runtime.spawn(initialize_grandpa(&mut net, all_peers)); // add 20 blocks net.peer(0).push_blocks(20, false); @@ -865,7 +789,8 @@ fn force_change_to_new_set() { let api = TestApi::new(make_ids(genesis_authorities)); let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); + let mut net = GrandpaTestNet::new(api, 3); + let voters_future = initialize_grandpa(&mut net, peers_a); let net = Arc::new(Mutex::new(net)); net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { @@ -903,6 +828,7 @@ fn force_change_to_new_set() { // it will only finalize if the forced transition happens. // we add_blocks after the voters are spawned because otherwise // the link-halves have the wrong AuthoritySet + runtime.spawn(voters_future); run_to_completion(&mut runtime, 25, net, peers_a); } @@ -944,7 +870,6 @@ fn allows_reimporting_change_blocks() { needs_justification: true, clear_justification_requests: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, header_only: false, }), @@ -1011,10 +936,10 @@ fn test_bad_justification() { fn voter_persists_its_votes() { use std::sync::atomic::{AtomicUsize, Ordering}; use futures::future; - use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -1023,152 +948,150 @@ fn voter_persists_its_votes() { // alice has a chain with 20 blocks let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - assert_eq!(net.peer(0).client().info().best_number, 20, - "Peer #{} failed to sync", 0); - - - let peer = net.peer(0); - let client = peer.client().clone(); - let net = Arc::new(Mutex::new(net)); - - // channel between the voter and the main controller. - // sending a message on the `voter_tx` restarts the voter. - let (voter_tx, voter_rx) = tracing_unbounded::<()>(""); - - let mut keystore_paths = Vec::new(); - - // startup a grandpa voter for alice but also listen for messages on a - // channel. whenever a message is received the voter is restarted. when the - // sender is dropped the voter is stopped. - { - let (keystore, keystore_path) = create_keystore(peers[0]); - keystore_paths.push(keystore_path); - - struct ResettableVoter { - voter: Pin + Send + Unpin>>, - voter_rx: TracingUnboundedReceiver<()>, - net: Arc>, - client: PeersClient, - keystore: SyncCryptoStorePtr, - } - - impl Future for ResettableVoter { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); - - if let Poll::Ready(()) = Pin::new(&mut this.voter).poll(cx) { - panic!("error in the voter"); - } - - match Pin::new(&mut this.voter_rx).poll_next(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Ready(Some(())) => { - let (_block_import, _, _, _, link) = - this.net.lock() - .make_block_import::< - TransactionFor - >(this.client.clone()); - let link = link.lock().take().unwrap(); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(this.keystore.clone()), - name: Some(format!("peer#{}", 0)), - is_authority: true, - observer_enabled: true, - }, - link, - network: this.net.lock().peers[0].network_service().clone(), - telemetry_on_connect: None, - voting_rule: VotingRulesBuilder::default().build(), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - - let voter = run_grandpa_voter(grandpa_params) - .expect("all in order with client and network") - .map(move |r| { - // we need to keep the block_import alive since it owns the - // sender for the voter commands channel, if that gets dropped - // then the voter will stop - drop(_block_import); - r - }); - - this.voter = Box::pin(voter); - // notify current task in order to poll the voter - cx.waker().wake_by_ref(); - } - }; - - Poll::Pending - } - } - - // we create a "dummy" voter by setting it to `pending` and triggering the `tx`. - // this way, the `ResettableVoter` will reset its `voter` field to a value ASAP. - voter_tx.unbounded_send(()).unwrap(); - runtime.spawn(ResettableVoter { - voter: Box::pin(futures::future::pending()), - voter_rx, - net: net.clone(), - client: client.clone(), - keystore, - }); - } - - let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); // create the communication layer for bob, but don't start any // voter. instead we'll listen for the prevote that alice casts // and cast our own manually - { + let bob_keystore = { let (keystore, keystore_path) = create_keystore(peers[1]); keystore_paths.push(keystore_path); - + keystore + }; + let bob_network = { let config = Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, - keystore: Some(keystore.clone()), + keystore: Some(bob_keystore.clone()), name: Some(format!("peer#{}", 1)), is_authority: true, observer_enabled: true, }; let set_state = { - let (_, _, _, _, link) = net.lock() + let bob_client = net.peer(1).client().clone(); + let (_, _, link) = net .make_block_import::< TransactionFor - >(client); + >(bob_client); let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); let PersistentData { set_state, .. } = persistent_data; set_state }; - let network = communication::NetworkBridge::new( - net.lock().peers[1].network_service().clone(), + communication::NetworkBridge::new( + net.peers[1].network_service().clone(), config.clone(), set_state, None, - ); + ) + }; + + // spawn two voters for alice. + // half-way through the test, we stop one and start the other. + let (alice_voter1, abort) = future::abortable({ + let (keystore, _) = create_keystore(peers[0]); + + let (net_service, link) = { + // temporary needed for some reason + let link = net.peers[0].data.lock().take().expect("link initialized at startup; qed"); + ( + net.peers[0].network_service().clone(), + link, + ) + }; - let (round_rx, round_tx) = network.round_communication( - Some((peers[1].public().into(), keystore).into()), + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); + + fn alice_voter2( + peers: &[Ed25519Keyring], + net: Arc>, + ) -> impl Future + Unpin + Send + 'static { + let (keystore, _) = create_keystore(peers[0]); + let mut net = net.lock(); + + // we add a new peer to the test network and we'll use + // the network service of this new peer + net.add_full_peer(); + let net_service = net.peers[2].network_service().clone(); + // but we'll reuse the client from the first peer (alice_voter1) + // since we want to share the same database, so that we can + // read the persisted state after aborting alice_voter1. + let alice_client = net.peer(0).client().clone(); + + let (_block_import, _, link) = net + .make_block_import::< + TransactionFor + >(alice_client); + let link = link.lock().take().unwrap(); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net_service, + telemetry_on_connect: None, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + }; + + run_grandpa_voter(grandpa_params) + .expect("all in order with client and network") + .map(move |r| { + // we need to keep the block_import alive since it owns the + // sender for the voter commands channel, if that gets dropped + // then the voter will stop + drop(_block_import); + r + }) + }; + + runtime.spawn(alice_voter1); + + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + assert_eq!(net.peer(0).client().info().best_number, 20, + "Peer #{} failed to sync", 0); + + let net = Arc::new(Mutex::new(net)); + + let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); + + { + let (round_rx, round_tx) = bob_network.round_communication( + Some((peers[1].public().into(), bob_keystore).into()), communication::Round(1), communication::SetId(0), Arc::new(VoterSet::new(voters).unwrap()), HasVoted::No, ); - runtime.spawn(network); + runtime.spawn(bob_network); let round_tx = Arc::new(Mutex::new(round_tx)); let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); @@ -1176,13 +1099,15 @@ fn voter_persists_its_votes() { let net = net.clone(); let state = Arc::new(AtomicUsize::new(0)); + let runtime_handle = runtime.handle().clone(); runtime.spawn(round_rx.for_each(move |signed| { let net2 = net.clone(); let net = net.clone(); - let voter_tx = voter_tx.clone(); + let abort = abort.clone(); let round_tx = round_tx.clone(); let state = state.clone(); let exit_tx = exit_tx.clone(); + let runtime_handle = runtime_handle.clone(); async move { if state.compare_and_swap(0, 1, Ordering::SeqCst) == 0 { @@ -1194,7 +1119,7 @@ fn voter_persists_its_votes() { // its chain has 20 blocks and the voter targets 3/4 of the // unfinalized chain, so the vote should be for block 15 - assert!(prevote.target_number == 15); + assert_eq!(prevote.target_number, 15); // we push 20 more blocks to alice's chain net.lock().peer(0).push_blocks(20, false); @@ -1217,7 +1142,8 @@ fn voter_persists_its_votes() { net.lock().peer(0).client().as_full().unwrap().hash(30).unwrap().unwrap(); // we restart alice's voter - voter_tx.unbounded_send(()).unwrap(); + abort.abort(); + runtime_handle.spawn(alice_voter2(peers, net.clone())); // and we push our own prevote for block 30 let prevote = finality_grandpa::Prevote { @@ -1274,6 +1200,19 @@ fn finalize_3_voters_1_light_observer() { let voters = make_ids(authorities); let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + let voters = initialize_grandpa(&mut net, authorities); + let observer = observer::run_grandpa_observer( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: None, + name: Some("observer".to_string()), + is_authority: false, + observer_enabled: true, + }, + net.peers[3].data.lock().take().expect("link initialized at startup; qed"), + net.peers[3].network_service().clone(), + ).unwrap(); net.peer(0).push_blocks(20, false); net.block_until_sync(); @@ -1283,126 +1222,10 @@ fn finalize_3_voters_1_light_observer() { } let net = Arc::new(Mutex::new(net)); - let link = net.lock().peer(3).data.lock().take().expect("link initialized on startup; qed"); - let finality_notifications = net.lock().peer(3).client().finality_notification_stream() - .take_while(|n| { - future::ready(n.header.number() < &20) - }) - .collect::>(); - - run_to_completion_with(&mut runtime, 20, net.clone(), authorities, |executor| { - executor.spawn( - observer::run_grandpa_observer( - Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: None, - name: Some("observer".to_string()), - is_authority: false, - observer_enabled: true, - }, - link, - net.lock().peers[3].network_service().clone(), - ).unwrap() - ); - - Some(Box::pin(finality_notifications.map(|_| ()))) - }); -} - -#[test] -fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { - sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); - - let peers = &[Ed25519Keyring::Alice]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); - net.add_light_peer(); - - // import block#1 WITH consensus data change. Light client ignores justification - // && instead fetches finality proof for block #1 - net.peer(0).push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - net.lock().block_until_sync(); - - // check that the block#1 is finalized on light client - runtime.block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(1).client().info().finalized_number == 1 { - Poll::Ready(()) - } else { - net.lock().poll(cx); - Poll::Pending - } - })); -} - -#[test] -fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different() { - // for debug: to ensure that without forced change light client will sync finality proof - const FORCE_CHANGE: bool = true; - - sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); - - // two of these guys are offline. - let genesis_authorities = if FORCE_CHANGE { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - Ed25519Keyring::One, - Ed25519Keyring::Two, - ] - } else { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ] - }; - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let api = TestApi::new(make_ids(&genesis_authorities)); - - let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); - let net = Arc::new(Mutex::new(net)); - - // best is #1 - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - // add a forced transition at block 5. - let mut block = builder.build().unwrap().block; - if FORCE_CHANGE { - add_forced_change(&mut block, 0, ScheduledChange { - next_authorities: voters.clone(), - delay: 3, - }); - } - block - }); - - // ensure block#10 enacts authorities set change => justification is generated - // normally it will reach light client, but because of the forced change, it will not - net.lock().peer(0).push_blocks(8, false); // best is #9 - net.lock().peer(0).push_authorities_change_block( - vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])] - ); // #10 - net.lock().peer(0).push_blocks(1, false); // best is #11 - net.lock().block_until_sync(); - - // finalize block #11 on full clients - run_to_completion(&mut runtime, 11, net.clone(), peers_a); - - // request finalization by light client - net.lock().add_light_peer(); - net.lock().block_until_sync(); - - // check block, finalized on light client - assert_eq!( - net.lock().peer(3).client().info().finalized_number, - if FORCE_CHANGE { 0 } else { 10 }, - ); + runtime.spawn(voters); + runtime.spawn(observer); + run_to_completion(&mut runtime, 20, net.clone(), authorities); } #[test] @@ -1413,9 +1236,7 @@ fn voter_catches_up_to_latest_round_when_behind() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(50, false); - net.block_until_sync(); + let net = GrandpaTestNet::new(TestApi::new(voters), 2); let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); @@ -1468,6 +1289,9 @@ fn voter_catches_up_to_latest_round_when_behind() { runtime.spawn(voter); } + net.lock().peer(0).push_blocks(50, false); + net.lock().block_until_sync(); + // wait for them to finalize block 50. since they'll vote on 3/4 of the // unfinalized chain it will take at least 4 rounds to do it. let wait_for_finality = ::futures::future::join_all(finality_notifications); @@ -1479,18 +1303,15 @@ fn voter_catches_up_to_latest_round_when_behind() { let runtime = runtime.handle().clone(); wait_for_finality.then(move |_| { - let peer_id = 2; + net.lock().add_full_peer(); + let link = { let net = net.lock(); - let mut link = net.peers[peer_id].data.lock(); + let mut link = net.peers[2].data.lock(); link.take().expect("link initialized at startup; qed") }; - let set_state = link.persistent_data.set_state.clone(); - - let voter = voter(None, peer_id, link, net); - - runtime.spawn(voter); + runtime.spawn(voter(None, 2, link, net.clone())); let start_time = std::time::Instant::now(); let timeout = Duration::from_secs(5 * 60); @@ -1540,7 +1361,6 @@ where { let PersistentData { ref authority_set, - ref consensus_changes, ref set_state, .. } = link.persistent_data; @@ -1564,7 +1384,6 @@ where Environment { authority_set: authority_set.clone(), config: config.clone(), - consensus_changes: consensus_changes.clone(), client: link.client.clone(), select_chain: link.select_chain.clone(), set_id: authority_set.set_id(), diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index e0b95a08d5caf3747ac7c002c789bc86621d9c98..a31e3e1f1e402df5bae884de80993d1f1fdcb630 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -329,7 +329,7 @@ impl KeystoreInner { /// Open the store at the given path. /// /// Optionally takes a password that will be used to encrypt/decrypt the keys. - pub fn open>(path: T, password: Option) -> Result { + fn open>(path: T, password: Option) -> Result { let path = path.into(); fs::create_dir_all(&path)?; @@ -345,7 +345,7 @@ impl KeystoreInner { } /// Create a new in-memory store. - pub fn new_in_memory() -> Self { + fn new_in_memory() -> Self { Self { path: None, additional: HashMap::new(), @@ -373,8 +373,8 @@ impl KeystoreInner { /// Insert a new key with anonymous crypto. /// - /// Places it into the file system store. - pub fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { + /// Places it into the file system store, if a path is configured. + fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { if let Some(path) = self.key_file_path(public, key_type) { let mut file = File::create(path).map_err(Error::Io)?; serde_json::to_writer(&file, &suri).map_err(Error::Json)?; @@ -385,13 +385,16 @@ impl KeystoreInner { /// Generate a new key. /// - /// Places it into the file system store. - pub fn generate_by_type(&self, key_type: KeyTypeId) -> Result { + /// Places it into the file system store, if a path is configured. Otherwise insert + /// it into the memory cache only. + fn generate_by_type(&mut self, key_type: KeyTypeId) -> Result { let (pair, phrase, _) = Pair::generate_with_phrase(self.password()); if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { let mut file = File::create(path)?; serde_json::to_writer(&file, &phrase)?; file.flush()?; + } else { + self.insert_ephemeral_pair(&pair, &phrase, key_type); } Ok(pair) } @@ -399,7 +402,7 @@ impl KeystoreInner { /// Create a new key from seed. /// /// Does not place it into the file system store. - pub fn insert_ephemeral_from_seed_by_type( + fn insert_ephemeral_from_seed_by_type( &mut self, seed: &str, key_type: KeyTypeId, @@ -422,7 +425,7 @@ impl KeystoreInner { } /// Get a key pair for the given public key and key type. - pub fn key_pair_by_type(&self, + fn key_pair_by_type(&self, public: &Pair::Public, key_type: KeyTypeId, ) -> Result { @@ -501,6 +504,8 @@ mod tests { str::FromStr, }; + const TEST_KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); + impl KeystoreInner { fn insert_ephemeral_from_seed(&mut self, seed: &str) -> Result { self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) @@ -515,7 +520,7 @@ mod tests { }) } - fn generate(&self) -> Result { + fn generate(&mut self) -> Result { self.generate_by_type::(Pair::ID).map(Into::into) } } @@ -523,7 +528,7 @@ mod tests { #[test] fn basic_store() { let temp_dir = TempDir::new().unwrap(); - let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); + let mut store = KeystoreInner::open(temp_dir.path(), None).unwrap(); assert!(store.public_keys::().unwrap().is_empty()); @@ -558,7 +563,7 @@ mod tests { fn password_being_used() { let password = String::from("password"); let temp_dir = TempDir::new().unwrap(); - let store = KeystoreInner::open( + let mut store = KeystoreInner::open( temp_dir.path(), Some(FromStr::from_str(password.as_str()).unwrap()), ).unwrap(); @@ -640,4 +645,27 @@ mod tests { SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty(), ); } + + #[test] + fn generate_with_seed_is_not_stored() { + let temp_dir = TempDir::new().unwrap(); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + let _alice_tmp_key = SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); + + drop(store); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 0); + } + + #[test] + fn generate_can_be_fetched_in_memory() { + let store = LocalKeystore::in_memory(); + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, None).unwrap(); + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 2); + } } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index be7953e528bd828a31ffb1c40ee8388b9add7cbe..74e1d613bcf56ae80b652cef2358fb45927b850c 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -441,14 +441,14 @@ impl StateBackend for GenesisOrUnavailableState } } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_in_child_storage(child_info, action), + state.apply_to_child_keys_while(child_info, action), GenesisOrUnavailableState::Unavailable => (), } } diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index fa0f02cd5aed914e9b93608f73e08260339bed00..458ea2bd6b844b5b430f08823f94f283a785ad87 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -276,7 +276,8 @@ pub fn check_execution_proof_with_make_header( // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code.runtime_code()?; + let runtime_code = backend_runtime_code.runtime_code() + .map_err(|_e| ClientError::RuntimeCodeMissing)?; execution_proof_check_on_trie_backend::( &trie_backend, diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 33113c2fc7df0de727d8cf0db95c42a51b74af12..60fce87b8d0c26ecddbdf22d2ebd09e08060a696 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -239,7 +239,7 @@ impl FetchChecker for LightDataChecker convert_hash(request.header.state_root()), remote_proof, request.keys.iter(), - ).map_err(Into::into) + ).map_err(|e| ClientError::from(e)) } fn check_read_child_proof( @@ -249,14 +249,14 @@ impl FetchChecker for LightDataChecker ) -> ClientResult, Option>>> { let child_info = match ChildType::from_prefixed_key(&request.storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err(ClientError::InvalidChildType), }; read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, &child_info, request.keys.iter(), - ).map_err(Into::into) + ).map_err(|e| ClientError::from(e)) } fn check_execution_proof( @@ -292,10 +292,10 @@ impl FetchChecker for LightDataChecker if *request.header.extrinsics_root() == extrinsics_root { Ok(body) } else { - Err(format!("RemoteBodyRequest: invalid extrinsics root expected: {} but got {}", - *request.header.extrinsics_root(), - extrinsics_root, - ).into()) + Err(ClientError::ExtrinsicRootInvalid { + received: request.header.extrinsics_root().to_string(), + expected: extrinsics_root.to_string(), + }) } } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index c120ff515c7adecda482de6f37ee0cb524092f0d..5b82bd679c0112b6f23cc61c4245d419dd1e757c 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.30.1", default-features = false } +libp2p = { version = "0.32.2", default-features = false } log = "0.4.8" -lru = "0.4.3" +lru = "0.6.1" sc-network = { version = "0.8.0", path = "../network" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } wasm-timer = "0.2" diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 98ada69590f1c5c7387675aab89da265fd522502..4deaad6d748fd13f7dc11f3f8d41fcfe77976862 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -72,11 +72,7 @@ impl GossipEngine { validator: Arc>, ) -> Self where B: 'static { let protocol = protocol.into(); - - // We grab the event stream before registering the notifications protocol, otherwise we - // might miss events. let network_event_stream = network.event_stream(); - network.register_notifications_protocol(protocol.clone()); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone()), @@ -335,8 +331,6 @@ mod tests { unimplemented!(); } - fn register_notifications_protocol(&self, _: Cow<'static, str>) {} - fn announce(&self, _: B::Hash, _: Vec) { unimplemented!(); } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 09e946d1a1ea9cbf069531a603534351e96800c8..2b333610223e2a19c0c4553b8973eb77e65919ee 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -81,14 +81,6 @@ pub trait Network { /// Send a notification to a peer. fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); - /// Registers a notifications protocol. - /// - /// See the documentation of [`NetworkService:register_notifications_protocol`] for more information. - fn register_notifications_protocol( - &self, - protocol: Cow<'static, str>, - ); - /// Notify everyone we're connected to that we have the given block. /// /// Note: this method isn't strictly related to gossiping and should eventually be moved @@ -113,13 +105,6 @@ impl Network for Arc> { NetworkService::write_notification(self, who, protocol, message) } - fn register_notifications_protocol( - &self, - protocol: Cow<'static, str>, - ) { - NetworkService::register_notifications_protocol(self, protocol) - } - fn announce(&self, block: B::Hash, associated_data: Vec) { NetworkService::announce_block(self, block, associated_data) } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 8bd6d9df01911eba80b8befdbb1690f3643358d9..88f9d48375dec9e7c834651b87666fd2c7ad9717 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -503,8 +503,6 @@ mod tests { unimplemented!(); } - fn register_notifications_protocol(&self, _: Cow<'static, str>) {} - fn announce(&self, _: B::Hash, _: Vec) { unimplemented!(); } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 6b66fd0cdee654d5caf1e37439f8f7ca09303905..b7cb1512dd45a2d09f5a3f0cb0298cd4f4d5c1f4 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -20,7 +20,7 @@ prost-build = "0.6.1" async-trait = "0.1" async-std = "1.6.5" bitflags = "1.2.0" -bs58 = "0.3.1" +bs58 = "0.4.0" bytes = "0.5.0" codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } derive_more = "0.99.2" @@ -36,9 +36,9 @@ ip_network = "0.3.4" linked-hash-map = "0.5.2" linked_hash_set = "0.1.3" log = "0.4.8" -lru = "0.4.0" +lru = "0.6.1" nohash-hasher = "0.2.0" -parking_lot = "0.10.0" +parking_lot = "0.11.1" pin-project = "0.4.6" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } prost = "0.6.1" @@ -50,7 +50,7 @@ serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } slog_derive = "0.2.0" -smallvec = "0.6.10" +smallvec = "1.5.0" sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } @@ -58,19 +58,19 @@ sp-core = { version = "2.0.0", path = "../../primitives/core" } sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } sp-utils = { version = "2.0.0", path = "../../primitives/utils" } thiserror = "1" -unsigned-varint = { version = "0.4.0", features = ["futures", "futures-codec"] } +unsigned-varint = { version = "0.5.0", features = ["futures", "futures-codec"] } void = "1.0.2" wasm-timer = "0.2" -zeroize = "1.0.0" +zeroize = "1.2.0" [dependencies.libp2p] -version = "0.30.1" +version = "0.32.2" default-features = false -features = ["identify", "kad", "mdns-async-std", "mplex", "noise", "ping", "request-response", "tcp-async-std", "websocket", "yamux"] +features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "request-response", "tcp-async-std", "websocket", "yamux"] [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.30.1", default-features = false } +libp2p = { version = "0.32.2", default-features = false } quickcheck = "0.9.0" rand = "0.7.2" sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } diff --git a/client/network/README.md b/client/network/README.md index e0bd691043bee3184be7745144fc06665cade72d..914720f53e2a989d57e6c75042d50570a2b714e5 100644 --- a/client/network/README.md +++ b/client/network/README.md @@ -120,8 +120,8 @@ bytes. block announces are pushed to other nodes. The handshake is empty on both sides. The message format is a SCALE-encoded tuple containing a block header followed with an opaque list of bytes containing some data associated with this block announcement, e.g. a candidate message. -- Notifications protocols that are registered using the `register_notifications_protocol` -method. For example: `/paritytech/grandpa/1`. See below for more information. +- Notifications protocols that are registered using `NetworkConfiguration::notifications_protocols`. +For example: `/paritytech/grandpa/1`. See below for more information. ## The legacy Substrate substream @@ -223,4 +223,4 @@ dispatching a background task with the [`NetworkWorker`]. More precise usage details are still being worked on and will likely change in the future. -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/network/build.rs b/client/network/build.rs index 8ed460f163eb4f9c0fbab6949bb0028b5e9dd808..2ccc72d99df9658b16d1c4c2be535e4d12993638 100644 --- a/client/network/build.rs +++ b/client/network/build.rs @@ -1,6 +1,5 @@ const PROTOS: &[&str] = &[ "src/schema/api.v1.proto", - "src/schema/finality.v1.proto", "src/schema/light.v1.proto" ]; diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 41723d9068c2e8411ae486db4d605af3ee364916..8b9e321ca599bcda0022b6b36bfcfd623cf72739 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -15,9 +15,9 @@ // along with Substrate. If not, see . use crate::{ - config::{ProtocolId, Role}, block_requests, light_client_handler, finality_requests, + config::{ProtocolId, Role}, block_requests, light_client_handler, peer_info, request_responses, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, - protocol::{message::{self, Roles}, CustomMessageOutcome, NotificationsSink, Protocol}, + protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, ObservedRole, DhtEvent, ExHashT, }; @@ -58,8 +58,6 @@ pub struct Behaviour { request_responses: request_responses::RequestResponsesBehaviour, /// Block request handling. block_requests: block_requests::BlockRequests, - /// Finality proof request handling. - finality_proof_requests: finality_requests::FinalityProofRequests, /// Light client request handling. light_client_handler: light_client_handler::LightClientHandler, @@ -76,7 +74,6 @@ pub struct Behaviour { pub enum BehaviourOut { BlockImport(BlockOrigin, Vec>), JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), /// Started a random iterative Kademlia discovery query. RandomKademliaStarted(ProtocolId), @@ -91,7 +88,7 @@ pub enum BehaviourOut { protocol: Cow<'static, str>, /// If `Ok`, contains the time elapsed between when we received the request and when we /// sent back the response. If `Err`, the error that happened. - result: Result, + result: Result, ResponseFailure>, }, /// A request initiated using [`Behaviour::send_request`] has succeeded or failed. @@ -182,7 +179,6 @@ impl Behaviour { user_agent: String, local_public_key: PublicKey, block_requests: block_requests::BlockRequests, - finality_proof_requests: finality_requests::FinalityProofRequests, light_client_handler: light_client_handler::LightClientHandler, disco_config: DiscoveryConfig, request_response_protocols: Vec, @@ -194,7 +190,6 @@ impl Behaviour { request_responses: request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, block_requests, - finality_proof_requests, light_client_handler, events: VecDeque::new(), role, @@ -334,8 +329,6 @@ Behaviour { self.events.push_back(BehaviourOut::BlockImport(origin, blocks)), CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => self.events.push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), - CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => - self.events.push_back(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), CustomMessageOutcome::BlockRequest { target, request } => { match self.block_requests.send_request(&target, request) { block_requests::SendRequestOutcome::Ok => { @@ -359,9 +352,6 @@ Behaviour { block_requests::SendRequestOutcome::EncodeError(_) => {}, } }, - CustomMessageOutcome::FinalityProofRequest { target, block_hash, request } => { - self.finality_proof_requests.send_request(&target, block_hash, request); - }, CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles, notifications_sink } => { let role = reported_roles_to_observed_role(&self.role, &remote, roles); for protocol in protocols { @@ -427,7 +417,7 @@ impl NetworkBehaviourEventProcess { @@ -454,26 +444,6 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess> for Behaviour { - fn inject_event(&mut self, event: finality_requests::Event) { - match event { - finality_requests::Event::Response { peer, block_hash, proof } => { - let response = message::FinalityProofResponse { - id: 0, - block: block_hash, - proof: if !proof.is_empty() { - Some(proof) - } else { - None - }, - }; - let ev = self.substrate.on_finality_proof_response(peer, response); - self.inject_event(ev); - } - } - } -} - impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 20fbe0284397d513a909cbcf72733a265c060214..61d19c10dae513f109f4b29ee457a21d246b02cd 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -32,15 +32,3 @@ impl Client for T T: HeaderBackend + ProofProvider + BlockIdTo + BlockBackend + HeaderMetadata + Send + Sync {} - -/// Finality proof provider. -pub trait FinalityProofProvider: Send + Sync { - /// Prove finality of the block. - fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result>, Error>; -} - -impl FinalityProofProvider for () { - fn prove_finality(&self, _for_block: Block::Hash, _request: &[u8]) -> Result>, Error> { - Ok(None) - } -} diff --git a/client/network/src/config.rs b/client/network/src/config.rs index db33623a2e3302ec847fe5d9c66cae495ff82864..b7b113dc146926659fe1a3c2383aa92afd7a8eeb 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -21,7 +21,7 @@ //! The [`Params`] struct is the struct that must be passed in order to initialize the networking. //! See the documentation of [`Params`]. -pub use crate::chain::{Client, FinalityProofProvider}; +pub use crate::chain::Client; pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; pub use crate::request_responses::{IncomingRequest, ProtocolConfig as RequestResponseConfig}; pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; @@ -70,17 +70,6 @@ pub struct Params { /// Client that contains the blockchain. pub chain: Arc>, - /// Finality proof provider. - /// - /// This object, if `Some`, is used when a node on the network requests a proof of finality - /// from us. - pub finality_proof_provider: Option>>, - - /// How to build requests for proofs of finality. - /// - /// This object, if `Some`, is used when we need a proof of finality from another node. - pub finality_proof_request_builder: Option>, - /// The `OnDemand` object acts as a "receiver" for block data requests from the client. /// If `Some`, the network worker will process these requests and answer them. /// Normally used only for light clients. @@ -153,25 +142,6 @@ impl fmt::Display for Role { } } -/// Finality proof request builder. -pub trait FinalityProofRequestBuilder: Send { - /// Build data blob, associated with the request. - fn build_request_data(&mut self, hash: &B::Hash) -> Vec; -} - -/// Implementation of `FinalityProofRequestBuilder` that builds a dummy empty request. -#[derive(Debug, Default)] -pub struct DummyFinalityProofRequestBuilder; - -impl FinalityProofRequestBuilder for DummyFinalityProofRequestBuilder { - fn build_request_data(&mut self, _: &B::Hash) -> Vec { - Vec::new() - } -} - -/// Shared finality proof request builder struct used by the queue. -pub type BoxFinalityProofRequestBuilder = Box + Send + Sync>; - /// Result of the transaction import. #[derive(Clone, Copy, Debug)] pub enum TransactionImport { diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 60d35dbdf1ae06e3dfeb33918185136b28e706af..b2517efb6607e1c78260d2c1dc69419445e0a79b 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -59,8 +59,6 @@ use libp2p::kad::handler::KademliaHandlerProto; use libp2p::kad::QueryId; use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; #[cfg(not(target_os = "unknown"))] -use libp2p::swarm::toggle::Toggle; -#[cfg(not(target_os = "unknown"))] use libp2p::mdns::{Mdns, MdnsEvent}; use libp2p::multiaddr::Protocol; use log::{debug, info, trace, warn}; @@ -206,15 +204,9 @@ impl DiscoveryConfig { discovery_only_if_under_num, #[cfg(not(target_os = "unknown"))] mdns: if enable_mdns { - match Mdns::new() { - Ok(mdns) => Some(mdns).into(), - Err(err) => { - warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - None.into() - } - } + MdnsWrapper::Instantiating(Mdns::new().boxed()) } else { - None.into() + MdnsWrapper::Disabled }, allow_non_globals_in_dht, known_external_addresses: LruHashSet::new( @@ -234,7 +226,7 @@ pub struct DiscoveryBehaviour { kademlias: HashMap>, /// Discovers nodes on the local network. #[cfg(not(target_os = "unknown"))] - mdns: Toggle, + mdns: MdnsWrapper, /// Stream that fires when we need to perform the next random Kademlia query. next_kad_random_query: Delay, /// After `next_kad_random_query` triggers, the next one triggers after this duration. @@ -693,7 +685,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } Err(e) => { - warn!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } @@ -704,7 +696,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { let ev = match res { Ok(ok) => DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_else(Default::default)), Err(e) => { - warn!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); DiscoveryOut::ValuePutFailed(e.into_key(), stats.duration().unwrap_or_else(Default::default)) } @@ -716,7 +708,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { Ok(ok) => debug!(target: "sub-libp2p", "Libp2p => Record republished: {:?}", ok.key), - Err(e) => warn!(target: "sub-libp2p", + Err(e) => debug!(target: "sub-libp2p", "Libp2p => Republishing of record {:?} failed with: {:?}", e.key(), e) } @@ -736,8 +728,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { handler, event: (pid.clone(), event) }), - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } } @@ -767,8 +759,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, // `event` is an enum with no variant - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } @@ -785,6 +777,48 @@ fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { v } +/// [`Mdns::new`] returns a future. Instead of forcing [`DiscoveryConfig::finish`] and all its +/// callers to be async, lazily instantiate [`Mdns`]. +#[cfg(not(target_os = "unknown"))] +enum MdnsWrapper { + Instantiating(futures::future::BoxFuture<'static, std::io::Result>), + Ready(Mdns), + Disabled, +} + +#[cfg(not(target_os = "unknown"))] +impl MdnsWrapper { + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + match self { + MdnsWrapper::Instantiating(_) => Vec::new(), + MdnsWrapper::Ready(mdns) => mdns.addresses_of_peer(peer_id), + MdnsWrapper::Disabled => Vec::new(), + } + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + params: &mut impl PollParameters, + ) -> Poll> { + loop { + match self { + MdnsWrapper::Instantiating(fut) => { + *self = match futures::ready!(fut.as_mut().poll(cx)) { + Ok(mdns) => MdnsWrapper::Ready(mdns), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + MdnsWrapper::Disabled + }, + } + } + MdnsWrapper::Ready(mdns) => return mdns.poll(cx, params), + MdnsWrapper::Disabled => return Poll::Pending, + } + } + } +} + #[cfg(test)] mod tests { use crate::config::ProtocolId; diff --git a/client/network/src/finality_requests.rs b/client/network/src/finality_requests.rs deleted file mode 100644 index 55f56b9a0cc25c582f122e00d5fa3c933b508f14..0000000000000000000000000000000000000000 --- a/client/network/src/finality_requests.rs +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! `NetworkBehaviour` implementation which handles incoming finality proof requests. -//! -//! Every request is coming in on a separate connection substream which gets -//! closed after we have sent the response back. Incoming requests are encoded -//! as protocol buffers (cf. `finality.v1.proto`). - -#![allow(unused)] - -use bytes::Bytes; -use codec::{Encode, Decode}; -use crate::{ - chain::FinalityProofProvider, - config::ProtocolId, - protocol::message, - schema, -}; -use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, OutboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{DeniedUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol - } -}; -use prost::Message; -use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; -use std::{ - cmp::min, - collections::VecDeque, - io, - iter, - marker::PhantomData, - sync::Arc, - time::Duration, - task::{Context, Poll} -}; -use void::{Void, unreachable}; - -// Type alias for convenience. -pub type Error = Box; - -/// Event generated by the finality proof requests behaviour. -#[derive(Debug)] -pub enum Event { - /// A response to a finality proof request has arrived. - Response { - peer: PeerId, - /// Block hash originally passed to `send_request`. - block_hash: B::Hash, - /// Finality proof returned by the remote. - proof: Vec, - }, -} - -/// Configuration options for `FinalityProofRequests`. -#[derive(Debug, Clone)] -pub struct Config { - max_request_len: usize, - max_response_len: usize, - inactivity_timeout: Duration, - protocol: Bytes, -} - -impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. request size = 1 MiB - /// - max. response size = 1 MiB - /// - inactivity timeout = 15s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_request_len: 1024 * 1024, - max_response_len: 1024 * 1024, - inactivity_timeout: Duration::from_secs(15), - protocol: Bytes::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. length of incoming finality proof request bytes. - pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { - self.max_request_len = v; - self - } - - /// Limit the max. length of incoming finality proof response bytes. - pub fn set_max_response_len(&mut self, v: usize) -> &mut Self { - self.max_response_len = v; - self - } - - /// Limit the max. duration the substream may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut v = Vec::new(); - v.extend_from_slice(b"/"); - v.extend_from_slice(id.as_ref().as_bytes()); - v.extend_from_slice(b"/finality-proof/1"); - self.protocol = v.into(); - self - } -} - -/// The finality proof request handling behaviour. -pub struct FinalityProofRequests { - /// This behaviour's configuration. - config: Config, - /// How to construct finality proofs. - finality_proof_provider: Option>>, - /// Futures sending back the finality proof request responses. - outgoing: FuturesUnordered>, - /// Events to return as soon as possible from `poll`. - pending_events: VecDeque, Event>>, -} - -impl FinalityProofRequests -where - B: Block, -{ - /// Initializes the behaviour. - /// - /// If the proof provider is `None`, then the behaviour will not support the finality proof - /// requests protocol. - pub fn new(cfg: Config, finality_proof_provider: Option>>) -> Self { - FinalityProofRequests { - config: cfg, - finality_proof_provider, - outgoing: FuturesUnordered::new(), - pending_events: VecDeque::new(), - } - } - - /// Issue a new finality proof request. - /// - /// If the response doesn't arrive in time, or if the remote answers improperly, the target - /// will be disconnected. - pub fn send_request(&mut self, target: &PeerId, block_hash: B::Hash, request: Vec) { - let protobuf_rq = schema::v1::finality::FinalityProofRequest { - block_hash: block_hash.encode(), - request, - }; - - let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); - if let Err(err) = protobuf_rq.encode(&mut buf) { - log::warn!("failed to encode finality proof request {:?}: {:?}", protobuf_rq, err); - return; - } - - log::trace!("enqueueing finality proof request to {:?}: {:?}", target, protobuf_rq); - self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::Any, - event: OutboundProtocol { - request: buf, - block_hash, - max_response_size: self.config.max_response_len, - protocol: self.config.protocol.clone(), - }, - }); - } - - /// Callback, invoked when a new finality request has been received from remote. - fn on_finality_request(&mut self, peer: &PeerId, request: &schema::v1::finality::FinalityProofRequest) - -> Result - { - let block_hash = Decode::decode(&mut request.block_hash.as_ref())?; - - log::trace!(target: "sync", "Finality proof request from {} for {}", peer, block_hash); - - // Note that an empty Vec is sent if no proof is available. - let finality_proof = if let Some(provider) = &self.finality_proof_provider { - provider - .prove_finality(block_hash, &request.request)? - .unwrap_or_default() - } else { - log::error!("Answering a finality proof request while finality provider is empty"); - return Err(From::from("Empty finality proof provider".to_string())) - }; - - Ok(schema::v1::finality::FinalityProofResponse { proof: finality_proof }) - } -} - -impl NetworkBehaviour for FinalityProofRequests -where - B: Block -{ - type ProtocolsHandler = OneShotHandler, OutboundProtocol, NodeEvent>; - type OutEvent = Event; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_len: self.config.max_request_len, - protocol: if self.finality_proof_provider.is_some() { - Some(self.config.protocol.clone()) - } else { - None - }, - marker: PhantomData, - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.keep_alive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _peer: &PeerId) { - } - - fn inject_disconnected(&mut self, _peer: &PeerId) { - } - - fn inject_event( - &mut self, - peer: PeerId, - connection: ConnectionId, - event: NodeEvent - ) { - match event { - NodeEvent::Request(request, mut stream) => { - match self.on_finality_request(&peer, &request) { - Ok(res) => { - log::trace!("enqueueing finality response for peer {}", peer); - let mut data = Vec::with_capacity(res.encoded_len()); - if let Err(e) = res.encode(&mut data) { - log::debug!("error encoding finality response for peer {}: {}", peer, e) - } else { - let future = async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!("error writing finality response: {}", e) - } - }; - self.outgoing.push(future.boxed()) - } - } - Err(e) => log::debug!("error handling finality request from peer {}: {}", peer, e) - } - } - NodeEvent::Response(response, block_hash) => { - let ev = Event::Response { - peer, - block_hash, - proof: response.proof, - }; - self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) - -> Poll, Event>> - { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(ev); - } - - while let Poll::Ready(Some(_)) = self.outgoing.poll_next_unpin(cx) {} - Poll::Pending - } -} - -/// Output type of inbound and outbound substream upgrades. -#[derive(Debug)] -pub enum NodeEvent { - /// Incoming request from remote and substream to use for the response. - Request(schema::v1::finality::FinalityProofRequest, T), - /// Incoming response from remote. - Response(schema::v1::finality::FinalityProofResponse, B::Hash), -} - -/// Substream upgrade protocol. -/// -/// We attempt to parse an incoming protobuf encoded request (cf. `Request`) -/// which will be handled by the `FinalityProofRequests` behaviour, i.e. the request -/// will become visible via `inject_node_event` which then dispatches to the -/// relevant callback to process the message and prepare a response. -#[derive(Debug, Clone)] -pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_len: usize, - /// The protocol to use during upgrade negotiation. If `None`, then the incoming protocol - /// is simply disabled. - protocol: Option, - /// Marker to pin the block type. - marker: PhantomData, -} - -impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - // This iterator will return either 0 elements if `self.protocol` is `None`, or 1 element if - // it is `Some`. - type InfoIter = std::option::IntoIter; - - fn protocol_info(&self) -> Self::InfoIter { - self.protocol.clone().into_iter() - } -} - -impl InboundUpgrade for InboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - let len = self.max_request_len; - let vec = read_one(&mut s, len).await?; - match schema::v1::finality::FinalityProofRequest::decode(&vec[..]) { - Ok(r) => Ok(NodeEvent::Request(r, s)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }.boxed() - } -} - -/// Substream upgrade protocol. -/// -/// Sends a request to remote and awaits the response. -#[derive(Debug, Clone)] -pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// Block hash that has been requested. - block_hash: B::Hash, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl OutboundUpgrade for OutboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - schema::v1::finality::FinalityProofResponse::decode(&vec[..]) - .map(|r| NodeEvent::Response(r, self.block_hash)) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }.boxed() - } -} diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs index ac3f92e9d37aaf6cb7469ab7779da40304c2c160..8a46d0701e932ce35b29b67968b6510af9823732 100644 --- a/client/network/src/gossip.rs +++ b/client/network/src/gossip.rs @@ -41,7 +41,7 @@ //! In normal situations, messages sent through a [`QueuedSender`] will arrive in the same //! order as they have been sent. //! It is possible, in the situation of disconnects and reconnects, that messages arrive in a -//! different order. See also https://github.com/paritytech/substrate/issues/6756. +//! different order. See also . //! However, if multiple instances of [`QueuedSender`] exist for the same peer and protocol, or //! if some other code uses the [`NetworkService`] to send notifications to this combination or //! peer and protocol, then the notifications will be interleaved in an unpredictable way. diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index e94052c0e4d29c30a17e7e87fec30bc30bd7acc9..93b69f7b64c8ec17d72b4dee99ef3dbf941d14c2 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -86,7 +86,6 @@ fn build_test_full_node(config: config::NetworkConfiguration) PassThroughVerifier(false), Box::new(client.clone()), None, - None, &sp_core::testing::TaskExecutor::new(), None, )); @@ -96,8 +95,6 @@ fn build_test_full_node(config: config::NetworkConfiguration) executor: None, network_config: config, chain: client.clone(), - finality_proof_provider: None, - finality_proof_request_builder: None, on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), protocol_id: config::ProtocolId::from("/test-protocol-name"), diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 3fd01c33dcf5f0d0dc8dd0846bc9700483ab6fa5..fb65c754d79a297a567f866e8ea583ce31a0f2a1 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -141,8 +141,9 @@ //! block announces are pushed to other nodes. The handshake is empty on both sides. The message //! format is a SCALE-encoded tuple containing a block header followed with an opaque list of //! bytes containing some data associated with this block announcement, e.g. a candidate message. -//! - Notifications protocols that are registered using the `register_notifications_protocol` -//! method. For example: `/paritytech/grandpa/1`. See below for more information. +//! - Notifications protocols that are registered using +//! `NetworkConfiguration::notifications_protocols`. For example: `/paritytech/grandpa/1`. See +//! below for more information. //! //! ## The legacy Substrate substream //! @@ -249,7 +250,6 @@ mod block_requests; mod chain; mod peer_info; mod discovery; -mod finality_requests; mod light_client_handler; mod on_demand_layer; mod protocol; @@ -284,6 +284,9 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; /// two peers, the per-peer connection limit is not set to 1 but 2. const MAX_CONNECTIONS_PER_PEER: usize = 2; +/// The maximum number of concurrent established connections that were incoming. +const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; + /// Minimum Requirements for a Hash within Networking pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index b72362fdfc362b086a4cab2747bd9e33352a76ce..007cdcbf7a603036273679a5b89a288e99bf6c22 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -44,6 +44,7 @@ use libp2p::{ upgrade::{OutboundUpgrade, read_one, write_one} }, swarm::{ + AddressRecord, NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, @@ -627,7 +628,7 @@ where let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); let child_info = match ChildType::from_prefixed_key(prefixed_key) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), - None => Err("Invalid child storage key".into()), + None => Err(sp_blockchain::Error::InvalidChildStorageKey), }; let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( &BlockId::Hash(block), @@ -1463,7 +1464,7 @@ mod tests { impl PollParameters for EmptyPollParams { type SupportedProtocolsIter = iter::Empty>; type ListenedAddressesIter = iter::Empty; - type ExternalAddressesIter = iter::Empty; + type ExternalAddressesIter = iter::Empty; fn supported_protocols(&self) -> Self::SupportedProtocolsIter { iter::empty() diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 084172ee57c4f002c66c27e22bbe6dfc42f750fa..6e0add18adb02e0237a3df4841f689f219c6f769 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -51,6 +51,17 @@ pub struct OnDemand { requests_send: TracingUnboundedSender>, } + +#[derive(Debug, thiserror::Error)] +#[error("AlwaysBadChecker")] +struct ErrorAlwaysBadChecker; + +impl Into for ErrorAlwaysBadChecker { + fn into(self) -> ClientError { + ClientError::Application(Box::new(self)) + } +} + /// Dummy implementation of `FetchChecker` that always assumes that responses are bad. /// /// Considering that it is the responsibility of the client to build the fetcher, it can use this @@ -65,7 +76,7 @@ impl FetchChecker for AlwaysBadChecker { _remote_header: Option, _remote_proof: StorageProof, ) -> Result { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_read_proof( @@ -73,7 +84,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteReadRequest, _remote_proof: StorageProof, ) -> Result,Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_read_child_proof( @@ -81,7 +92,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteReadChildRequest, _remote_proof: StorageProof, ) -> Result, Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_execution_proof( @@ -89,7 +100,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteCallRequest, _remote_proof: StorageProof, ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_changes_proof( @@ -97,7 +108,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteChangesRequest, _remote_proof: ChangesProof ) -> Result, u32)>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_body_proof( @@ -105,7 +116,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteBodyRequest, _body: Vec ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } } diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index e69ad2b17e59c7f856f1e1e4b73be88572519936..0bf2fe59fa21aeebedf81ba0f456ba4291c5f975 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -304,8 +304,8 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: EitherOutput::First(event) }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } @@ -334,8 +334,8 @@ impl NetworkBehaviour for PeerInfoBehaviour { handler, event: EitherOutput::Second(event) }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index d0b6b2823a2c8c7c11dfea28c4253562ab731389..41326b6d82a07b69cd3f7ec22ef6638a7ef1a22b 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -19,7 +19,7 @@ use crate::{ ExHashT, chain::Client, - config::{BoxFinalityProofRequestBuilder, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, + config::{ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, error, utils::{interval, LruHashSet}, }; @@ -120,8 +120,6 @@ mod rep { pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); /// Peer role does not match (e.g. light peer connecting to another light peer). pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); - /// Peer response data does not have requested bits. - pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); /// Peer send us a block announcement that failed at validation. pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); } @@ -131,7 +129,6 @@ struct Metrics { peers: Gauge, queued_blocks: Gauge, fork_targets: Gauge, - finality_proofs: GaugeVec, justifications: GaugeVec, propagated_transactions: Counter, } @@ -165,16 +162,6 @@ impl Metrics { )?; register(g, r)? }, - finality_proofs: { - let g = GaugeVec::new( - Opts::new( - "sync_extra_finality_proofs", - "Number of extra finality proof requests", - ), - &["status"], - )?; - register(g, r)? - }, propagated_transactions: register(Counter::new( "sync_propagated_transactions", "Number of transactions propagated to at least one peer", @@ -365,7 +352,6 @@ impl Protocol { local_peer_id: PeerId, chain: Arc>, transaction_pool: Arc>, - finality_proof_request_builder: Option>, protocol_id: ProtocolId, peerset_config: sc_peerset::PeersetConfig, block_announce_validator: Box + Send>, @@ -377,7 +363,6 @@ impl Protocol { config.roles, chain.clone(), &info, - finality_proof_request_builder, block_announce_validator, config.max_parallel_downloads, ); @@ -541,10 +526,9 @@ impl Protocol { self.sync.num_sync_requests() } - /// Sync local state with the blockchain state. - pub fn update_chain(&mut self) { - let info = self.context_data.chain.info(); - self.sync.update_chain_info(&info.best_hash, info.best_number); + /// Inform sync about new best imported block. + pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + self.sync.update_chain_info(&hash, number); self.behaviour.set_legacy_handshake_message( build_status_message(&self.config, &self.context_data.chain), ); @@ -554,11 +538,6 @@ impl Protocol { ); } - /// Inform sync about an own imported block. - pub fn own_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - self.sync.update_chain_info(&hash, number); - } - fn update_peer_info(&mut self, who: &PeerId) { if let Some(info) = self.sync.peer_info(who) { if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { @@ -614,10 +593,7 @@ impl Protocol { warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), GenericMessage::RemoteChangesResponse(_) => warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), - GenericMessage::FinalityProofResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected FinalityProofResponse"), GenericMessage::BlockRequest(_) | - GenericMessage::FinalityProofRequest(_) | GenericMessage::RemoteReadChildRequest(_) | GenericMessage::RemoteCallRequest(_) | GenericMessage::RemoteReadRequest(_) | @@ -728,20 +704,6 @@ impl Protocol { } } } else { - // Validate fields against the request. - if request.fields.contains(message::BlockAttributes::HEADER) && response.blocks.iter().any(|b| b.header.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing header for a block"); - return CustomMessageOutcome::None - } - if request.fields.contains(message::BlockAttributes::BODY) && response.blocks.iter().any(|b| b.body.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing body for a block"); - return CustomMessageOutcome::None - } - match self.sync.on_block_data(&peer, Some(request), response) { Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), @@ -1105,16 +1067,11 @@ impl Protocol { let is_best = self.context_data.chain.info().best_hash == hash; debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); - self.send_announcement(&header, data, is_best, true) - } - - fn send_announcement(&mut self, header: &B::Header, data: Vec, is_best: bool, force: bool) { - let hash = header.hash(); for (who, ref mut peer) in self.context_data.peers.iter_mut() { - trace!(target: "sync", "Announcing block {:?} to {}", hash, who); let inserted = peer.known_blocks.insert(hash); - if inserted || force { + if inserted { + trace!(target: "sync", "Announcing block {:?} to {}", hash, who); let message = message::BlockAnnounce { header: header.clone(), state: if is_best { @@ -1191,7 +1148,11 @@ impl Protocol { self.update_peer_info(&who); (header, is_best, who) } - sync::PollBlockAnnounceValidation::Failure { who } => { + sync::PollBlockAnnounceValidation::Failure { who, disconnect } => { + if disconnect { + self.disconnect_peer(&who); + } + self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); return CustomMessageOutcome::None } @@ -1274,18 +1235,6 @@ impl Protocol { count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)> ) { - let new_best = results.iter().rev().find_map(|r| match r { - (Ok(BlockImportResult::ImportedUnknown(n, aux, _)), hash) if aux.is_new_best => Some((*n, hash.clone())), - _ => None, - }); - if let Some((best_num, best_hash)) = new_best { - self.sync.update_chain_info(&best_hash, best_num); - self.behaviour.set_legacy_handshake_message(build_status_message(&self.config, &self.context_data.chain)); - self.behaviour.set_notif_protocol_handshake( - &self.block_announces_protocol, - BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() - ); - } let results = self.sync.on_blocks_processed( imported, count, @@ -1314,13 +1263,6 @@ impl Protocol { self.sync.on_justification_import(hash, number, success) } - /// Request a finality proof for the given block. - /// - /// Queues a new finality proof request and tries to dispatch all pending requests. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.sync.request_finality_proof(&hash, number) - } - /// Notify the protocol that we have learned about the existence of nodes. /// /// Can be called multiple times with the same `PeerId`s. @@ -1328,34 +1270,6 @@ impl Protocol { self.behaviour.add_discovered_nodes(peer_ids) } - pub fn finality_proof_import_result( - &mut self, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - self.sync.on_finality_proof_import(request_block, finalization_result) - } - - /// Must be called after a [`CustomMessageOutcome::FinalityProofRequest`] has been emitted, - /// to notify of the response having arrived. - pub fn on_finality_proof_response( - &mut self, - who: PeerId, - response: message::FinalityProofResponse, - ) -> CustomMessageOutcome { - trace!(target: "sync", "Finality proof response from {} for {}", who, response.block); - match self.sync.on_block_finality_proof(who, response) { - Ok(sync::OnBlockFinalityProof::Nothing) => CustomMessageOutcome::None, - Ok(sync::OnBlockFinalityProof::Import { peer, hash, number, proof }) => - CustomMessageOutcome::FinalityProofImport(peer, hash, number, proof), - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - } - } - } - fn format_stats(&self) -> String { let mut out = String::new(); for (id, stats) in &self.context_data.stats { @@ -1399,15 +1313,6 @@ impl Protocol { .set(m.justifications.failed_requests.into()); metrics.justifications.with_label_values(&["importing"]) .set(m.justifications.importing_requests.into()); - - metrics.finality_proofs.with_label_values(&["pending"]) - .set(m.finality_proofs.pending_requests.into()); - metrics.finality_proofs.with_label_values(&["active"]) - .set(m.finality_proofs.active_requests.into()); - metrics.finality_proofs.with_label_values(&["failed"]) - .set(m.finality_proofs.failed_requests.into()); - metrics.finality_proofs.with_label_values(&["importing"]) - .set(m.finality_proofs.importing_requests.into()); } } } @@ -1418,7 +1323,6 @@ impl Protocol { pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, @@ -1443,12 +1347,6 @@ pub enum CustomMessageOutcome { /// must be silently discarded. /// It is the responsibility of the handler to ensure that a timeout exists. BlockRequest { target: PeerId, request: message::BlockRequest }, - /// A new finality proof request must be emitted. - /// Once you have the response, you must call `Protocol::on_finality_proof_response`. - /// It is the responsibility of the handler to ensure that a timeout exists. - /// If the request times out, or the peer responds in an invalid way, the peer has to be - /// disconnect. This will inform the state machine that the request it has emitted is stale. - FinalityProofRequest { target: PeerId, block_hash: B::Hash, request: Vec }, /// Peer has a reported a new head of chain. PeerNewBest(PeerId, NumberFor), None, @@ -1545,14 +1443,6 @@ impl NetworkBehaviour for Protocol { }; self.pending_messages.push_back(event); } - for (id, r) in self.sync.finality_proof_requests() { - let event = CustomMessageOutcome::FinalityProofRequest { - target: id, - block_hash: r.block, - request: r.request, - }; - self.pending_messages.push_back(event); - } if let Poll::Ready(Some((tx_hash, result))) = self.pending_transactions.poll_next_unpin(cx) { if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) { peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result)); @@ -1582,8 +1472,8 @@ impl NetworkBehaviour for Protocol { return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), }; let outcome = match event { diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index f84aead47283a376afe1333eb06578d863f8796b..b8b4cce0a72c750363ef17a0d79899d63158bb8a 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -650,13 +650,17 @@ impl GenericProto { Some(sink) => sink }; + let message = message.into(); + trace!( target: "sub-libp2p", - "External API => Notification({:?}, {:?})", + "External API => Notification({:?}, {:?}, {} bytes)", target, protocol_name, + message.len(), ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + notifs_sink.send_sync_notification( protocol_name, message @@ -1561,6 +1565,7 @@ impl NetworkBehaviour for GenericProto { connec_state, ConnectionState::OpeningThenClosing )); + *entry.into_mut() = PeerState::Disabled { connections, backoff_until }; } } else { error!( @@ -1571,34 +1576,20 @@ impl NetworkBehaviour for GenericProto { } } - // DisabledPendingEnable => DisabledPendingEnable | Incoming + // DisabledPendingEnable => Enabled | DisabledPendingEnable PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { - *connec_state = ConnectionState::OpenDesiredByRemote; - - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; - - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - source, incoming_id); - self.peerset.incoming(source.clone(), incoming_id); - self.incoming.push(IncomingPeer { + debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open", + source, connection); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source.clone(), - alive: true, - incoming_id, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open, }); + *connec_state = ConnectionState::Opening; - *entry.into_mut() = PeerState::Incoming { - connections, - backoff_until: Some(timer_deadline), - }; + *entry.into_mut() = PeerState::Enabled { connections }; } else { // Connections in `OpeningThenClosing` are in a Closed phase, and @@ -1662,6 +1653,7 @@ impl NetworkBehaviour for GenericProto { }; if matches!(connections[pos].1, ConnectionState::Closing) { + *entry.into_mut() = PeerState::Enabled { connections }; return; } @@ -1693,7 +1685,7 @@ impl NetworkBehaviour for GenericProto { notifications_sink: replacement_sink, }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - *entry.into_mut() = PeerState::Enabled { connections, }; + *entry.into_mut() = PeerState::Enabled { connections }; } } else { @@ -1930,9 +1922,10 @@ impl NetworkBehaviour for GenericProto { if self.is_open(&source) { trace!( target: "sub-libp2p", - "Handler({:?}) => Notification({:?})", + "Handler({:?}) => Notification({:?}, {} bytes)", source, protocol_name, + message.len() ); trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); let event = GenericProtoOut::Notification { @@ -1945,9 +1938,10 @@ impl NetworkBehaviour for GenericProto { } else { trace!( target: "sub-libp2p", - "Handler({:?}) => Post-close notification({:?})", + "Handler({:?}) => Post-close notification({:?}, {} bytes)", source, protocol_name, + message.len() ); } } diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index 0272261f67d57d5dc0fe64575df6883ad6c9deed..e479a34d14f3abd7cd66f9e04b940cfbe4610df2 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -138,6 +138,9 @@ pub struct NotifsHandler { /// Whether we are the connection dialer or listener. endpoint: ConnectedPoint, + /// Remote we are connected to. + peer_id: PeerId, + /// State of this handler. state: State, @@ -260,12 +263,13 @@ impl IntoProtocolsHandler for NotifsHandlerProto { SelectUpgrade::new(in_protocols, self.legacy_protocol.clone()) } - fn into_handler(self, _: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + fn into_handler(self, peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { let num_out_proto = self.out_protocols.len(); NotifsHandler { in_protocols: self.in_protocols, out_protocols: self.out_protocols, + peer_id: peer_id.clone(), endpoint: connected_point.clone(), when_connection_open: Instant::now(), state: State::Closed { @@ -365,6 +369,8 @@ pub struct NotificationsSink { #[derive(Debug)] struct NotificationsSinkInner { + /// Target of the sink. + peer_id: PeerId, /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. async_channel: FuturesMutex>, /// Sender to use in synchronous contexts. Uses a synchronous mutex. @@ -390,6 +396,11 @@ enum NotificationsSinkMessage { } impl NotificationsSink { + /// Returns the [`PeerId`] the sink is connected to. + pub fn peer_id(&self) -> &PeerId { + &self.inner.peer_id + } + /// Sends a notification to the peer. /// /// If too many messages are already buffered, the notification is silently discarded and the @@ -447,6 +458,12 @@ pub struct Ready<'a> { } impl<'a> Ready<'a> { + /// Returns the name of the protocol. Matches the one passed to + /// [`NotificationsSink::reserve_notification`]. + pub fn protocol_name(&self) -> &Cow<'static, str> { + &self.protocol_name + } + /// Consumes this slots reservation and actually queues the notification. /// /// Returns an error if the substream has been closed. @@ -622,6 +639,7 @@ impl ProtocolsHandler for NotifsHandler { let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); let notifications_sink = NotificationsSink { inner: Arc::new(NotificationsSinkInner { + peer_id: self.peer_id.clone(), async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(sync_tx), }), @@ -666,6 +684,7 @@ impl ProtocolsHandler for NotifsHandler { _ => unreachable!() }; + debug_assert_eq!(pending_opening.len(), self.out_protocols.len()); for (n, is_pending) in pending_opening.iter().enumerate() { if *is_pending { continue; @@ -715,15 +734,16 @@ impl ProtocolsHandler for NotifsHandler { }, NotifsHandlerIn::Close => { - for mut substream in self.legacy_substreams.drain() { + for mut substream in self.legacy_substreams.drain(..) { substream.shutdown(); self.legacy_shutdown.push(substream); } match &mut self.state { State::Open { .. } => { + let pending_opening = self.out_protocols.iter().map(|_| false).collect(); self.state = State::Closed { - pending_opening: Vec::new(), + pending_opening, }; }, State::Opening { out_substreams, .. } => { @@ -782,6 +802,7 @@ impl ProtocolsHandler for NotifsHandler { let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); let notifications_sink = NotificationsSink { inner: Arc::new(NotificationsSinkInner { + peer_id: self.peer_id.clone(), async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(sync_tx), }), @@ -971,6 +992,16 @@ impl ProtocolsHandler for NotifsHandler { if let Some(pos) = self.out_protocols.iter().position(|(n, _)| *n == protocol_name) { if let Some(substream) = out_substreams[pos].as_mut() { let _ = substream.start_send_unpin(message); + // Calling `start_send_unpin` only queues the message. Actually + // emitting the message is done with `poll_flush`. In order to + // not introduce too much complexity, this flushing is done earlier + // in the body of this `poll()` method. As such, we schedule a task + // wake-up now in order to guarantee that `poll()` will be called + // again and the flush happening. + // At the time of the writing of this comment, a rewrite of this + // code is being planned. If you find this comment in the wild and + // the rewrite didn't happen, please consider a refactor. + cx.waker().wake_by_ref(); continue 'poll_notifs_sink; } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index dae7b86db8771a9eb3ed697324e34aaeb788a678..4213d56bbf022dbc442778d7bd73d2671f56897c 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -25,7 +25,6 @@ pub use self::generic::{ BlockAnnounce, RemoteCallRequest, RemoteReadRequest, RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, - FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; use sc_client_api::StorageProof; @@ -280,11 +279,10 @@ pub mod generic { RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. RemoteReadChildRequest(RemoteReadChildRequest), - /// Finality proof request. - FinalityProofRequest(FinalityProofRequest), - /// Finality proof response. - FinalityProofResponse(FinalityProofResponse), /// Batch of consensus protocol messages. + // NOTE: index is incremented by 2 due to finality proof related + // messages that were removed. + #[codec(index = "17")] ConsensusBatch(Vec), } @@ -307,8 +305,6 @@ pub mod generic { Message::RemoteChangesRequest(_) => "RemoteChangesRequest", Message::RemoteChangesResponse(_) => "RemoteChangesResponse", Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", - Message::FinalityProofRequest(_) => "FinalityProofRequest", - Message::FinalityProofResponse(_) => "FinalityProofResponse", Message::ConsensusBatch(_) => "ConsensusBatch", } } @@ -546,26 +542,4 @@ pub mod generic { /// Missing changes tries roots proof. pub roots_proof: StorageProof, } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof request. - pub struct FinalityProofRequest { - /// Unique request id. - pub id: RequestId, - /// Hash of the block to request proof for. - pub block: H, - /// Additional data blob (that both requester and provider understood) required for proving finality. - pub request: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof response. - pub struct FinalityProofResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Hash of the block (the same as in the FinalityProofRequest). - pub block: H, - /// Finality proof (if available). - pub proof: Option>, - } } diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 03714b05ace0d2f3d51da667c0d20faf67cf41b3..1ff8d37afeca97182f7c29e50d9df7bbd690c028 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -34,10 +34,8 @@ use sp_consensus::{BlockOrigin, BlockStatus, block_validation::{BlockAnnounceValidator, Validation}, import_queue::{IncomingBlock, BlockImportResult, BlockImportError} }; -use crate::{ - config::BoxFinalityProofRequestBuilder, - protocol::message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, - FinalityProofResponse, Roles}, +use crate::protocol::message::{ + self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, Roles, }; use either::Either; use extra_requests::ExtraRequests; @@ -46,12 +44,14 @@ use log::{debug, trace, warn, info, error}; use sp_runtime::{ Justification, generic::BlockId, - traits::{Block as BlockT, Header, NumberFor, Zero, One, CheckedSub, SaturatedConversion, Hash, HashFor} + traits::{ + Block as BlockT, Header as HeaderT, NumberFor, Zero, One, CheckedSub, SaturatedConversion, + Hash, HashFor, + }, }; use sp_arithmetic::traits::Saturating; use std::{ - fmt, ops::Range, collections::{HashMap, hash_map::Entry, HashSet, VecDeque}, - sync::Arc, pin::Pin, + fmt, ops::Range, collections::{HashMap, hash_map::Entry, HashSet}, sync::Arc, pin::Pin, }; use futures::{task::Poll, Future, stream::FuturesUnordered, FutureExt, StreamExt}; @@ -85,9 +85,6 @@ const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER: usize = 4; /// so far behind. const MAJOR_SYNC_BLOCKS: u8 = 5; -/// Number of recently announced blocks to track for each peer. -const ANNOUNCE_HISTORY_SIZE: usize = 64; - mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a @@ -110,17 +107,17 @@ mod rep { /// Peer did not provide us with advertised block data. pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); - /// Reputation change for peers which send us a known block. - pub const KNOWN_BLOCK: Rep = Rep::new(-(1 << 29), "Duplicate block"); + /// Reputation change for peers which send us non-requested block data. + pub const NOT_REQUESTED: Rep = Rep::new(-(1 << 29), "Not requested block data"); /// Reputation change for peers which send us a block with bad justifications. pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - /// Reputation change for peers which send us a block with bad finality proof. - pub const BAD_FINALITY_PROOF: Rep = Rep::new(-(1 << 16), "Bad finality proof"); - /// Reputation change when a peer sent us invlid ancestry result. pub const UNKNOWN_ANCESTOR:Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); } enum PendingRequests { @@ -185,8 +182,6 @@ pub struct ChainSync { /// What block attributes we require for this node, usually derived from /// what role we are, but could be customized required_block_attributes: message::BlockAttributes, - /// Any extra finality proof requests. - extra_finality_proofs: ExtraRequests, /// Any extra justification requests. extra_justifications: ExtraRequests, /// A set of hashes of blocks that are being downloaded or have been @@ -195,8 +190,6 @@ pub struct ChainSync { /// The best block number that was successfully imported into the chain. /// This can not decrease. best_imported_number: NumberFor, - /// Finality proof handler. - request_builder: Option>, /// Fork sync targets. fork_targets: HashMap>, /// A set of peers for which there might be potential block requests @@ -228,9 +221,6 @@ pub struct PeerSync { /// The state of syncing this peer is in for us, generally categories /// into `Available` or "busy" with something as defined by `PeerSyncState`. pub state: PeerSyncState, - /// A queue of blocks that this peer has announced to us, should only - /// contain `ANNOUNCE_HISTORY_SIZE` entries. - pub recently_announced: VecDeque } /// The sync status of a peer we are trying to sync with @@ -270,8 +260,6 @@ pub enum PeerSyncState { DownloadingStale(B::Hash), /// Downloading justification for given block hash. DownloadingJustification(B::Hash), - /// Downloading finality proof for given block hash. - DownloadingFinalityProof(B::Hash) } impl PeerSyncState { @@ -336,6 +324,8 @@ pub enum PollBlockAnnounceValidation { Failure { /// Who sent the processed block announcement? who: PeerId, + /// Should the peer be disconnected? + disconnect: bool, }, /// The announcement does not require further handling. Nothing { @@ -366,6 +356,8 @@ enum PreValidateBlockAnnounce { Failure { /// Who sent the processed block announcement? who: PeerId, + /// Should the peer be disconnected? + disconnect: bool, }, /// The announcement does not require further handling. Nothing { @@ -402,20 +394,6 @@ pub enum OnBlockJustification { } } -/// Result of [`ChainSync::on_block_finality_proof`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockFinalityProof { - /// The proof needs no further handling. - Nothing, - /// The proof should be imported. - Import { - peer: PeerId, - hash: B::Hash, - number: NumberFor, - proof: Vec - } -} - /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. enum HasSlotForBlockAnnounceValidation { /// Yes, there is a slot for the block announce validation. @@ -432,7 +410,6 @@ impl ChainSync { role: Roles, client: Arc>, info: &BlockchainInfo, - request_builder: Option>, block_announce_validator: Box + Send>, max_parallel_downloads: u32, ) -> Self { @@ -449,12 +426,10 @@ impl ChainSync { best_queued_hash: info.best_hash, best_queued_number: info.best_number, best_imported_number: info.best_number, - extra_finality_proofs: ExtraRequests::new("finality proof"), extra_justifications: ExtraRequests::new("justification"), role, required_block_attributes, queue_blocks: Default::default(), - request_builder, fork_targets: Default::default(), pending_requests: Default::default(), block_announce_validator, @@ -542,7 +517,6 @@ impl ChainSync { best_hash, best_number, state: PeerSyncState::Available, - recently_announced: Default::default() }); return Ok(None) } @@ -555,7 +529,6 @@ impl ChainSync { best_hash, best_number, state: PeerSyncState::Available, - recently_announced: Default::default(), }); self.pending_requests.add(&who); return Ok(None) @@ -579,7 +552,6 @@ impl ChainSync { start: self.best_queued_number, state: AncestorSearchState::ExponentialBackoff(One::one()), }, - recently_announced: Default::default() }); Ok(Some(ancestry_request::(common_best))) @@ -591,7 +563,6 @@ impl ChainSync { best_hash, best_number, state: PeerSyncState::Available, - recently_announced: Default::default(), }); self.pending_requests.add(&who); Ok(None) @@ -613,14 +584,6 @@ impl ChainSync { }) } - /// Schedule a finality proof request for the given block. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_finality_proofs.schedule((*hash, number), |base, block| { - is_descendent_of(&**client, base, block) - }) - } - /// Request syncing for the given block from given set of peers. // The implementation is similar to on_block_announce with unknown parent hash. pub fn set_sync_fork_request( @@ -700,30 +663,6 @@ impl ChainSync { }) } - /// Get an iterator over all scheduled finality proof requests. - pub fn finality_proof_requests(&mut self) -> impl Iterator)> + '_ { - let peers = &mut self.peers; - let request_builder = &mut self.request_builder; - let mut matcher = self.extra_finality_proofs.matcher(); - std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(&peers) { - peers.get_mut(&peer) - .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") - .state = PeerSyncState::DownloadingFinalityProof(request.0); - let req = message::generic::FinalityProofRequest { - id: 0, - block: request.0, - request: request_builder.as_mut() - .map(|builder| builder.build_request_data(&request.0)) - .unwrap_or_default() - }; - Some((peer, req)) - } else { - None - } - }) - } - /// Get an iterator over all block requests of all peers. pub fn block_requests(&mut self) -> impl Iterator)> + '_ { if self.pending_requests.is_empty() { @@ -811,13 +750,13 @@ impl ChainSync { blocks.reverse() } self.pending_requests.add(who); - if request.is_some() { + if let Some(request) = request { match &mut peer.state { PeerSyncState::DownloadingNew(start_block) => { self.blocks.clear_peer_download(who); let start_block = *start_block; peer.state = PeerSyncState::Available; - validate_blocks::(&blocks, who)?; + validate_blocks::(&blocks, who, Some(request))?; self.blocks.insert(start_block, blocks, who.clone()); self.blocks .drain(self.best_queued_number + One::one()) @@ -840,7 +779,7 @@ impl ChainSync { debug!(target: "sync", "Empty block response from {}", who); return Err(BadPeer(who.clone(), rep::NO_BLOCK)); } - validate_blocks::(&blocks, who)?; + validate_blocks::(&blocks, who, Some(request))?; blocks.into_iter().map(|b| { IncomingBlock { hash: b.hash, @@ -920,12 +859,11 @@ impl ChainSync { } | PeerSyncState::Available - | PeerSyncState::DownloadingJustification(..) - | PeerSyncState::DownloadingFinalityProof(..) => Vec::new() + | PeerSyncState::DownloadingJustification(..) => Vec::new() } } else { // When request.is_none() this is a block announcement. Just accept blocks. - validate_blocks::(&blocks, who)?; + validate_blocks::(&blocks, who, None)?; blocks.into_iter().map(|b| { IncomingBlock { hash: b.hash, @@ -939,40 +877,30 @@ impl ChainSync { }).collect() } } else { - Vec::new() + // We don't know of this peer, so we also did not request anything from it. + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)); }; - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - let is_recent = new_blocks.first() - .map(|block| { - self.peers.iter().any(|(_, peer)| peer.recently_announced.contains(&block.hash)) - }) - .unwrap_or(false); - - if !is_recent && new_blocks.last().map_or(false, |b| self.is_known(&b.hash)) { - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - debug!(target: "sync", "Ignoring known blocks from {}", who); - return Err(BadPeer(who.clone(), rep::KNOWN_BLOCK)); - } let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); if new_blocks.len() != orig_len { debug!(target: "sync", "Ignoring {} blocks that are already queued", orig_len - new_blocks.len()); } - let origin = - if is_recent { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; + let origin = if self.status().state != SyncState::Downloading { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; if let Some((h, n)) = new_blocks.last().and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) { - trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), h, origin); + trace!( + target:"sync", + "Accepted {} blocks ({:?}) with origin {:?}", + new_blocks.len(), + h, + origin, + ); self.on_block_queued(h, n) } @@ -1033,41 +961,6 @@ impl ChainSync { Ok(OnBlockJustification::Nothing) } - /// Handle new finality proof data. - pub fn on_block_finality_proof - (&mut self, who: PeerId, resp: FinalityProofResponse) -> Result, BadPeer> - { - let peer = - if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_finality_proof_data with a bad peer ID"); - return Ok(OnBlockFinalityProof::Nothing) - }; - - self.pending_requests.add(&who); - if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one finality proof at a time. - if hash != resp.block { - info!( - target: "sync", - "💔 Invalid block finality proof provided: requested: {:?} got: {:?}", - hash, - resp.block - ); - return Err(BadPeer(who, rep::BAD_FINALITY_PROOF)); - } - - if let Some((peer, hash, number, p)) = self.extra_finality_proofs.on_response(who, resp.proof) { - return Ok(OnBlockFinalityProof::Import { peer, hash, number, proof: p }) - } - } - - Ok(OnBlockFinalityProof::Nothing) - } - /// A batch of blocks have been processed, with or without errors. /// /// Call this when a batch of blocks have been processed by the import @@ -1122,11 +1015,6 @@ impl ChainSync { } } - if aux.needs_finality_proof { - trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash); - self.request_finality_proof(&hash, number); - } - if number > self.best_imported_number { self.best_imported_number = number; } @@ -1178,22 +1066,8 @@ impl ChainSync { self.pending_requests.set_all(); } - pub fn on_finality_proof_import(&mut self, req: (B::Hash, NumberFor), res: Result<(B::Hash, NumberFor), ()>) { - self.extra_finality_proofs.try_finalize_root(req, res, true); - self.pending_requests.set_all(); - } - /// Notify about finalization of the given block. pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - let r = self.extra_finality_proofs.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - - if let Err(err) = r { - warn!(target: "sync", "💔 Error cleaning up pending extra finality proof data requests: {:?}", err) - } - let client = &self.client; let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { is_descendent_of(&**client, base, block) @@ -1345,14 +1219,14 @@ impl ChainSync { announce, who, }, - Ok(Validation::Failure) => { + Ok(Validation::Failure { disconnect }) => { debug!( target: "sync", "Block announcement validation of block {} from {} failed", hash, who, ); - PreValidateBlockAnnounce::Failure { who } + PreValidateBlockAnnounce::Failure { who, disconnect } } Err(e) => { error!(target: "sync", "💔 Block announcement validation errored: {}", e); @@ -1410,9 +1284,9 @@ impl ChainSync { self.peer_block_announce_validation_finished(&who); return PollBlockAnnounceValidation::Nothing { is_best, who, header: announce.header } }, - PreValidateBlockAnnounce::Failure { who } => { + PreValidateBlockAnnounce::Failure { who, disconnect } => { self.peer_block_announce_validation_finished(&who); - return PollBlockAnnounceValidation::Failure { who } + return PollBlockAnnounceValidation::Failure { who, disconnect } }, PreValidateBlockAnnounce::Process { announce, is_new_best, who } => { self.peer_block_announce_validation_finished(&who); @@ -1435,11 +1309,6 @@ impl ChainSync { return PollBlockAnnounceValidation::Nothing { is_best, who, header } }; - while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { - peer.recently_announced.pop_front(); - } - peer.recently_announced.push_back(hash.clone()); - if is_best { // update their best block peer.best_number = number; @@ -1506,14 +1375,12 @@ impl ChainSync { self.blocks.clear_peer_download(who); self.peers.remove(who); self.extra_justifications.peer_disconnected(who); - self.extra_finality_proofs.peer_disconnected(who); self.pending_requests.set_all(); } /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. - /// their state was `DownloadingJustification` or `DownloadingFinalityProof`) are unaffected and - /// will stay in the same state. + /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. fn restart<'a>( &'a mut self, ) -> impl Iterator), BadPeer>> + 'a { @@ -1526,11 +1393,10 @@ impl ChainSync { let old_peers = std::mem::take(&mut self.peers); old_peers.into_iter().filter_map(move |(id, p)| { - // peers that were downloading justifications or finality proofs + // peers that were downloading justifications // should be kept in that state. match p.state { - PeerSyncState::DownloadingJustification(_) - | PeerSyncState::DownloadingFinalityProof(_) => { + PeerSyncState::DownloadingJustification(_) => { self.peers.insert(id, p); return None; } @@ -1570,7 +1436,6 @@ impl ChainSync { Metrics { queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - finality_proofs: self.extra_finality_proofs.metrics(), justifications: self.extra_justifications.metrics(), _priv: () } @@ -1581,7 +1446,6 @@ impl ChainSync { pub(crate) struct Metrics { pub(crate) queued_blocks: u32, pub(crate) fork_targets: u32, - pub(crate) finality_proofs: extra_requests::Metrics, pub(crate) justifications: extra_requests::Metrics, _priv: () } @@ -1677,7 +1541,7 @@ fn peer_block_request( trace!( target: "sync", "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", - id, finalized, peer.common_number, peer.best_number, best_num, + id, peer.common_number, finalized, peer.best_number, best_num, ); } if let Some(range) = blocks.needed_blocks( @@ -1720,8 +1584,7 @@ fn fork_sync_request( finalized: NumberFor, attributes: &message::BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, -) -> Option<(B::Hash, BlockRequest)> -{ +) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { if r.number <= finalized { trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); @@ -1774,7 +1637,75 @@ fn is_descendent_of(client: &T, base: &Block::Hash, block: &Block::Has Ok(ancestor.hash == *base) } -fn validate_blocks(blocks: &Vec>, who: &PeerId) -> Result<(), BadPeer> { +/// Validate that the given `blocks` are correct. +/// +/// It is expected that `blocks` are in asending order. +fn validate_blocks( + blocks: &Vec>, + who: &PeerId, + request: Option>, +) -> Result<(), BadPeer> { + if let Some(request) = request { + if Some(blocks.len() as _) > request.max { + debug!( + target: "sync", + "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", + who, + request.max, + blocks.len(), + ); + + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) + } + + let block_header = if request.direction == message::Direction::Descending { + blocks.last() + } else { + blocks.first() + }.and_then(|b| b.header.as_ref()); + + let expected_block = block_header.as_ref() + .map_or(false, |h| match request.from { + message::FromBlock::Hash(hash) => h.hash() == hash, + message::FromBlock::Number(n) => h.number() == &n, + }); + + if !expected_block { + debug!( + target: "sync", + "Received block that was not requested. Requested {:?}, got {:?}.", + request.from, + block_header, + ); + + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) + } + + if request.fields.contains(message::BlockAttributes::HEADER) + && blocks.iter().any(|b| b.header.is_none()) + { + trace!( + target: "sync", + "Missing requested header for a block in response from {}.", + who, + ); + + return Err(BadPeer(who.clone(), rep::BAD_RESPONSE)) + } + + if request.fields.contains(message::BlockAttributes::BODY) + && blocks.iter().any(|b| b.body.is_none()) + { + trace!( + target: "sync", + "Missing requested body for a block in response from {}.", + who, + ); + + return Err(BadPeer(who.clone(), rep::BAD_RESPONSE)) + } + } + for b in blocks { if let Some(header) = &b.header { let hash = header.hash(); @@ -1805,20 +1736,23 @@ fn validate_blocks(blocks: &Vec>, who: } } } + Ok(()) } #[cfg(test)] mod test { - use super::message::FromBlock; + use super::message::{FromBlock, BlockState, BlockData}; use super::*; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use substrate_test_runtime_client::{ - runtime::{Block, Hash}, + runtime::{Block, Hash, Header}, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + BlockBuilderExt, }; + use futures::{future::poll_fn, executor::block_on}; #[test] fn processes_empty_response_on_justification_request_for_unknown_block() { @@ -1835,7 +1769,6 @@ mod test { Roles::AUTHORITY, client.clone(), &info, - None, block_announce_validator, 1, ); @@ -1907,7 +1840,6 @@ mod test { Roles::AUTHORITY, client.clone(), &info, - None, Box::new(DefaultBlockAnnounceValidator), 1, ); @@ -1915,7 +1847,6 @@ mod test { let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); let peer_id3 = PeerId::random(); - let peer_id4 = PeerId::random(); let mut new_blocks = |n| { for _ in 0..n { @@ -1928,7 +1859,6 @@ mod test { }; let (b1_hash, b1_number) = new_blocks(50); - let (b2_hash, b2_number) = new_blocks(10); // add 2 peers at blocks that we don't have locally sync.new_peer(peer_id1.clone(), Hash::random(), 42).unwrap(); @@ -1958,38 +1888,189 @@ mod test { PeerSyncState::DownloadingJustification(b1_hash), ); - // add another peer at a known later block - sync.new_peer(peer_id4.clone(), b2_hash, b2_number).unwrap(); - - // we request a finality proof for a block we have locally - sync.request_finality_proof(&b2_hash, b2_number); - - // the finality proof request should be scheduled to peer 4 - // which is at that block - assert!( - sync.finality_proof_requests().any(|(p, r)| { p == peer_id4 && r.block == b2_hash }) - ); - - assert_eq!( - sync.peers.get(&peer_id4).unwrap().state, - PeerSyncState::DownloadingFinalityProof(b2_hash), - ); - // we restart the sync state let block_requests = sync.restart(); // which should make us send out block requests to the first two peers assert!(block_requests.map(|r| r.unwrap()).all(|(p, _)| { p == peer_id1 || p == peer_id2 })); - // peer 3 and 4 should be unaffected as they were downloading finality data + // peer 3 should be unaffected it was downloading finality data assert_eq!( sync.peers.get(&peer_id3).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); + } - assert_eq!( - sync.peers.get(&peer_id4).unwrap().state, - PeerSyncState::DownloadingFinalityProof(b2_hash), + /// Send a block annoucnement for the given `header`. + fn send_block_announce( + header: Header, + peer_id: &PeerId, + sync: &mut ChainSync, + ) { + let block_annnounce = BlockAnnounce { + header: header.clone(), + state: Some(BlockState::Best), + data: Some(Vec::new()), + }; + + sync.push_block_announce_validation( + peer_id.clone(), + header.hash(), + block_annnounce, + true, ); + + // Poll until we have procssed the block announcement + block_on(poll_fn(|cx| loop { + if sync.poll_block_announce_validation(cx).is_pending() { + break Poll::Ready(()) + } + })) + } + + /// Create a block response from the given `blocks`. + fn create_block_response(blocks: Vec) -> BlockResponse { + BlockResponse:: { + id: 0, + blocks: blocks.into_iter().map(|b| + BlockData:: { + hash: b.hash(), + header: Some(b.header().clone()), + body: Some(b.deconstruct().1), + receipt: None, + message_queue: None, + justification: None, + } + ).collect(), + } + } + + /// Get a block request from `sync` and check that is matches the expected request. + fn get_block_request( + sync: &mut ChainSync, + from: message::FromBlock, + max: u32, + peer: &PeerId, + ) -> BlockRequest { + let requests = sync.block_requests().collect::>(); + assert_eq!(1, requests.len()); + assert_eq!(peer, requests[0].0); + + let request = requests[0].1.clone(); + + assert_eq!(from, request.from); + assert_eq!(Some(max), request.max); + request + } + + /// This test is a regression test as observed on a real network. + /// + /// The node is connected to multiple peers. Both of these peers are having a best block (1) that + /// is below our best block (3). Now peer 2 announces a fork of block 3 that we will + /// request from peer 2. After imporitng the fork, peer 2 and then peer 1 will announce block 4. + /// But as peer 1 in our view is still at block 1, we will request block 2 (which we already have) + /// from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request for block + /// 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to succeed, as we + /// have requested block 2 from both peers. + #[test] + fn do_not_report_peer_on_block_response_for_block_request() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let info = client.info(); + + let mut sync = ChainSync::new( + Roles::AUTHORITY, + client.clone(), + &info, + Box::new(DefaultBlockAnnounceValidator), + 5, + ); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let mut client2 = client.clone(); + let mut build_block = || { + let block = client2.new_block(Default::default()).unwrap().build().unwrap().block; + client2.import(BlockOrigin::Own, block.clone()).unwrap(); + + block + }; + + let mut client2 = client.clone(); + let mut build_block_at = |at, import| { + let mut block_builder = client2.new_block_at(&BlockId::Hash(at), Default::default(), false) + .unwrap(); + // Make sure we generate a different block as fork + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + + let block = block_builder.build().unwrap().block; + + if import { + client2.import(BlockOrigin::Own, block.clone()).unwrap(); + } + + block + }; + + let block1 = build_block(); + let block2 = build_block(); + let block3 = build_block(); + let block3_fork = build_block_at(block2.hash(), false); + + // Add two peers which are on block 1. + sync.new_peer(peer_id1.clone(), block1.hash(), 1).unwrap(); + sync.new_peer(peer_id2.clone(), block1.hash(), 1).unwrap(); + + // Tell sync that our best block is 3. + sync.update_chain_info(&block3.hash(), 3); + + // There should be no requests. + assert!(sync.block_requests().collect::>().is_empty()); + + // Let peer2 announce a fork of block 3 + send_block_announce(block3_fork.header().clone(), &peer_id2, &mut sync); + + // Import and tell sync that we now have the fork. + client.import(BlockOrigin::Own, block3_fork.clone()).unwrap(); + sync.update_chain_info(&block3_fork.hash(), 3); + + let block4 = build_block_at(block3_fork.hash(), false); + + // Let peer2 announce block 4 and check that sync wants to get the block. + send_block_announce(block4.header().clone(), &peer_id2, &mut sync); + + let request = get_block_request(&mut sync, FromBlock::Hash(block4.hash()), 2, &peer_id2); + + // Peer1 announces the same block, but as the common block is still `1`, sync will request + // block 2 again. + send_block_announce(block4.header().clone(), &peer_id1, &mut sync); + + let request2 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id1); + + let response = create_block_response(vec![block4.clone(), block3_fork.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request), response).unwrap(); + + // We should not yet import the blocks, because there is still an open request for fetching + // block `2` which blocks the import. + assert!(matches!(res, OnBlockData::Import(_, blocks) if blocks.is_empty())); + + let request3 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id2); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id1, Some(request2), response).unwrap(); + assert!( + matches!( + res, + OnBlockData::Import(_, blocks) + if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) + ) + ); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request3), response).unwrap(); + // Nothing to import + assert!(matches!(res, OnBlockData::Import(_, blocks) if blocks.is_empty())); } } diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index df336c25339fd8eb42b635735e7fab94fcd2feb6..7a7198aa7a0b60fa2217f45a6a9be144d23571df 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -528,13 +528,12 @@ mod tests { impl Arbitrary for ArbitraryPeerSyncState { fn arbitrary(g: &mut G) -> Self { - let s = match g.gen::() % 5 { + let s = match g.gen::() % 4 { 0 => PeerSyncState::Available, // TODO: 1 => PeerSyncState::AncestorSearch(g.gen(), AncestorSearchState), 1 => PeerSyncState::DownloadingNew(g.gen::()), 2 => PeerSyncState::DownloadingStale(Hash::random()), - 3 => PeerSyncState::DownloadingJustification(Hash::random()), - _ => PeerSyncState::DownloadingFinalityProof(Hash::random()) + _ => PeerSyncState::DownloadingJustification(Hash::random()), }; ArbitraryPeerSyncState(s) } @@ -550,7 +549,6 @@ mod tests { best_hash: Hash::random(), best_number: g.gen(), state: ArbitraryPeerSyncState::arbitrary(g).0, - recently_announced: Default::default() }; ArbitraryPeerSync(ps) } diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index a3a68f719d6bb535e539737a2d4495f2794c7d6d..a410ae0dff559262343312616137cb1df4f2db9c 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -48,9 +48,10 @@ use libp2p::{ PollParameters, ProtocolsHandler, }, }; +use lru::LruCache; use std::{ borrow::Cow, collections::{hash_map::Entry, HashMap}, convert::TryFrom as _, io, iter, - pin::Pin, task::{Context, Poll}, time::Duration, + pin::Pin, task::{Context, Poll}, time::{Duration, Instant}, }; pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; @@ -128,7 +129,10 @@ pub enum Event { protocol: Cow<'static, str>, /// If `Ok`, contains the time elapsed between when we received the request and when we /// sent back the response. If `Err`, the error that happened. - result: Result, + /// + /// Note: Given that response time is tracked on a best-effort basis only, `Ok(time)` can be + /// `None`. + result: Result, ResponseFailure>, }, /// A request initiated using [`RequestResponsesBehaviour::send_request`] has succeeded or @@ -154,21 +158,19 @@ pub struct RequestResponsesBehaviour { /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the /// response to send back to the remote. pending_responses: stream::FuturesUnordered< - Pin + Send>> + Pin> + Send>> >, + + /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. + pending_responses_arrival_time: LruCache, } /// Generated by the response builder and waiting to be processed. -enum RequestProcessingOutcome { - Response { - protocol: Cow<'static, str>, - inner_channel: ResponseChannel, ()>>, - response: Vec, - }, - Busy { - peer: PeerId, - protocol: Cow<'static, str>, - }, +struct RequestProcessingOutcome { + request_id: RequestId, + protocol: Cow<'static, str>, + inner_channel: ResponseChannel, ()>>, + response: Vec, } impl RequestResponsesBehaviour { @@ -201,7 +203,8 @@ impl RequestResponsesBehaviour { Ok(Self { protocols, - pending_responses: stream::FuturesUnordered::new(), + pending_responses: Default::default(), + pending_responses_arrival_time: LruCache::new(1_000), }) } @@ -347,22 +350,31 @@ impl NetworkBehaviour for RequestResponsesBehaviour { > { 'poll_all: loop { // Poll to see if any response is ready to be sent back. - while let Poll::Ready(Some(result)) = self.pending_responses.poll_next_unpin(cx) { - match result { - RequestProcessingOutcome::Response { - protocol, inner_channel, response - } => { - if let Some((protocol, _)) = self.protocols.get_mut(&*protocol) { - protocol.send_response(inner_channel, Ok(response)); - } - } - RequestProcessingOutcome::Busy { peer, protocol } => { - let out = Event::InboundRequest { - peer, - protocol, - result: Err(ResponseFailure::Busy), - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { + let RequestProcessingOutcome { + request_id, + protocol: protocol_name, + inner_channel, + response + } = match outcome { + Some(outcome) => outcome, + // The response builder was too busy and thus the request was dropped. This is + // later on reported as a `InboundFailure::Omission`. + None => continue, + }; + + if let Some((protocol, _)) = self.protocols.get_mut(&*protocol_name) { + if let Err(_) = protocol.send_response(inner_channel, Ok(response)) { + // Note: In case this happened due to a timeout, the corresponding + // `RequestResponse` behaviour will emit an `InboundFailure::Timeout` event. + self.pending_responses_arrival_time.pop(&request_id); + log::debug!( + target: "sub-libp2p", + "Failed to send response for {:?} on protocol {:?} due to a \ + timeout or due to the connection to the peer being closed. \ + Dropping response", + request_id, protocol_name, + ); } } } @@ -398,9 +410,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { event: ((*protocol).to_string(), event), }) } - NetworkBehaviourAction::ReportObservedAddr { address } => { + NetworkBehaviourAction::ReportObservedAddr { address, score } => { return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { - address, + address, score, }) } }; @@ -409,15 +421,21 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Received a request from a remote. RequestResponseEvent::Message { peer, - message: RequestResponseMessage::Request { request, channel, .. }, + message: RequestResponseMessage::Request { request_id, request, channel, .. }, } => { + self.pending_responses_arrival_time.put( + request_id.clone(), + Instant::now(), + ); + let (tx, rx) = oneshot::channel(); // Submit the request to the "response builder" passed by the user at // initialization. if let Some(resp_builder) = resp_builder { - // If the response builder is too busy, silently drop `tx`. - // This will be reported as a `Busy` error. + // If the response builder is too busy, silently drop `tx`. This + // will be reported by the corresponding `RequestResponse` through + // an `InboundFailure::Omission` event. let _ = resp_builder.try_send(IncomingRequest { peer: peer.clone(), payload: request, @@ -428,13 +446,14 @@ impl NetworkBehaviour for RequestResponsesBehaviour { let protocol = protocol.clone(); self.pending_responses.push(Box::pin(async move { // The `tx` created above can be dropped if we are not capable of - // processing this request, which is reflected as a "Busy" error. + // processing this request, which is reflected as a + // `InboundFailure::Omission` event. if let Ok(response) = rx.await { - RequestProcessingOutcome::Response { - protocol, inner_channel: channel, response - } + Some(RequestProcessingOutcome { + request_id, protocol, inner_channel: channel, response + }) } else { - RequestProcessingOutcome::Busy { peer, protocol } + None } })); @@ -445,11 +464,10 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Received a response from a remote to one of our requests. RequestResponseEvent::Message { - message: - RequestResponseMessage::Response { - request_id, - response, - }, + message: RequestResponseMessage::Response { + request_id, + response, + }, .. } => { let out = Event::RequestFinished { @@ -472,8 +490,10 @@ impl NetworkBehaviour for RequestResponsesBehaviour { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); } - // Remote has tried to send a request but failed. - RequestResponseEvent::InboundFailure { peer, error, .. } => { + // An inbound request failed, either while reading the request or due to failing + // to send a response. + RequestResponseEvent::InboundFailure { request_id, peer, error, .. } => { + self.pending_responses_arrival_time.pop(&request_id); let out = Event::InboundRequest { peer, protocol: protocol.clone(), @@ -481,6 +501,24 @@ impl NetworkBehaviour for RequestResponsesBehaviour { }; return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); } + RequestResponseEvent::ResponseSent { request_id, peer } => { + let arrival_time = self.pending_responses_arrival_time.pop(&request_id) + .map(|t| t.elapsed()); + if arrival_time.is_none() { + log::debug!( + "Expected to find arrival time for sent response. Is the LRU \ + cache size set too small?", + ); + } + + let out = Event::InboundRequest { + peer, + protocol: protocol.clone(), + result: Ok(arrival_time), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + + } }; } } @@ -520,8 +558,6 @@ pub enum RequestFailure { /// Error when processing a request sent by a remote. #[derive(Debug, derive_more::Display, derive_more::Error)] pub enum ResponseFailure { - /// Internal response builder is too busy to process this request. - Busy, /// Problem on the network. #[display(fmt = "Problem on the network")] Network(#[error(ignore)] InboundFailure), @@ -655,7 +691,10 @@ impl RequestResponseCodec for GenericCodec { #[cfg(test)] mod tests { - use futures::{channel::mpsc, prelude::*}; + use futures::channel::mpsc; + use futures::executor::LocalPool; + use futures::prelude::*; + use futures::task::Spawn; use libp2p::identity::Keypair; use libp2p::Multiaddr; use libp2p::core::upgrade; @@ -666,7 +705,8 @@ mod tests { #[test] fn basic_request_response_works() { - let protocol_name = "/test/req-rep/1"; + let protocol_name = "/test/req-resp/1"; + let mut pool = LocalPool::new(); // Build swarms whose behaviour is `RequestResponsesBehaviour`. let mut swarms = (0..2) @@ -694,12 +734,12 @@ mod tests { inbound_queue: Some(tx), })).unwrap(); - async_std::task::spawn(async move { + pool.spawner().spawn_obj(async move { while let Some(rq) = rx.next().await { assert_eq!(rq.payload, b"this is a request"); let _ = rq.pending_response.send(b"this is a response".to_vec()); } - }); + }.boxed().into()).unwrap(); b }; @@ -719,26 +759,24 @@ mod tests { Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); } - // Running `swarm[0]` in the background until a `InboundRequest` event happens, - // which is a hint about the test having ended. - async_std::task::spawn({ + // Running `swarm[0]` in the background. + pool.spawner().spawn_obj({ let (mut swarm, _) = swarms.remove(0); async move { loop { match swarm.next_event().await { SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { - assert!(result.is_ok()); - break + result.unwrap(); }, _ => {} } } - } - }); + }.boxed().into() + }).unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); - async_std::task::block_on(async move { + pool.run_until(async move { let mut sent_request_id = None; loop { @@ -769,7 +807,8 @@ mod tests { #[test] fn max_response_size_exceeded() { - let protocol_name = "/test/req-rep/1"; + let protocol_name = "/test/req-resp/1"; + let mut pool = LocalPool::new(); // Build swarms whose behaviour is `RequestResponsesBehaviour`. let mut swarms = (0..2) @@ -797,12 +836,12 @@ mod tests { inbound_queue: Some(tx), })).unwrap(); - async_std::task::spawn(async move { + pool.spawner().spawn_obj(async move { while let Some(rq) = rx.next().await { assert_eq!(rq.payload, b"this is a request"); let _ = rq.pending_response.send(b"this response exceeds the limit".to_vec()); } - }); + }.boxed().into()).unwrap(); b }; @@ -824,7 +863,7 @@ mod tests { // Running `swarm[0]` in the background until a `InboundRequest` event happens, // which is a hint about the test having ended. - async_std::task::spawn({ + pool.spawner().spawn_obj({ let (mut swarm, _) = swarms.remove(0); async move { loop { @@ -836,12 +875,12 @@ mod tests { _ => {} } } - } - }); + }.boxed().into() + }).unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); - async_std::task::block_on(async move { + pool.run_until(async move { let mut sent_request_id = None; loop { diff --git a/client/network/src/schema.rs b/client/network/src/schema.rs index 44fbbffd25406d5b985707f46f8c2e9123ca992a..423d3ef5b41e4d8124138d8cdd0f8c7cf062f90d 100644 --- a/client/network/src/schema.rs +++ b/client/network/src/schema.rs @@ -20,9 +20,6 @@ pub mod v1 { include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); - pub mod finality { - include!(concat!(env!("OUT_DIR"), "/api.v1.finality.rs")); - } pub mod light { include!(concat!(env!("OUT_DIR"), "/api.v1.light.rs")); } diff --git a/client/network/src/schema/finality.v1.proto b/client/network/src/schema/finality.v1.proto deleted file mode 100644 index 843bc4eca0990cc01b1479e19d68a721395266c4..0000000000000000000000000000000000000000 --- a/client/network/src/schema/finality.v1.proto +++ /dev/null @@ -1,19 +0,0 @@ -// Schema definition for finality proof request/responses. - -syntax = "proto3"; - -package api.v1.finality; - -// Request a finality proof from a peer. -message FinalityProofRequest { - // SCALE-encoded hash of the block to request. - bytes block_hash = 1; - // Opaque chain-specific additional request data. - bytes request = 2; -} - -// Response to a finality proof request. -message FinalityProofResponse { - // Opaque chain-specific finality proof. Empty if no such proof exists. - bytes proof = 1; // optional -} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 3296a97d71bbc10d6080332ae58f1aaf2f3b32f9..3a368088e5392b97ad4bbf9b62966672c09d60fc 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -38,16 +38,41 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, on_demand_layer::AlwaysBadChecker, - light_client_handler, block_requests, finality_requests, - protocol::{self, event::Event, NotifsHandlerError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, + light_client_handler, block_requests, + protocol::{ + self, + NotifsHandlerError, + NotificationsSink, + PeerInfo, + Protocol, + Ready, + event::Event, + sync::SyncState, + }, transport, ReputationChange, }; use futures::{channel::oneshot, prelude::*}; use libp2p::{PeerId, multiaddr, Multiaddr}; -use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; +use libp2p::core::{ + ConnectedPoint, + Executor, + connection::{ + ConnectionLimits, + ConnectionError, + PendingConnectionError + }, + either::EitherError, + upgrade +}; use libp2p::kad::record; use libp2p::ping::handler::PingFailure; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handler::NodeHandlerWrapperError}; +use libp2p::swarm::{ + AddressScore, + NetworkBehaviour, + SwarmBuilder, + SwarmEvent, + protocols_handler::NodeHandlerWrapperError +}; use log::{error, info, trace, warn}; use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; use parking_lot::Mutex; @@ -248,7 +273,6 @@ impl NetworkWorker { local_peer_id.clone(), params.chain.clone(), params.transaction_pool, - params.finality_proof_request_builder, params.protocol_id.clone(), peerset_config, params.block_announce_validator, @@ -267,10 +291,6 @@ impl NetworkWorker { let config = block_requests::Config::new(¶ms.protocol_id); block_requests::BlockRequests::new(config, params.chain.clone()) }; - let finality_proof_requests = { - let config = finality_requests::Config::new(¶ms.protocol_id); - finality_requests::FinalityProofRequests::new(config, params.finality_proof_provider.clone()) - }; let light_client_handler = { let config = light_client_handler::Config::new(¶ms.protocol_id); light_client_handler::LightClientHandler::new( @@ -310,7 +330,6 @@ impl NetworkWorker { user_agent, local_public, block_requests, - finality_proof_requests, light_client_handler, discovery_config, params.network_config.request_response_protocols, @@ -338,7 +357,11 @@ impl NetworkWorker { transport::build_transport(local_identity, config_mem, config_wasm) }; let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) - .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER) + .connection_limits(ConnectionLimits::default() + .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) + .with_max_established_incoming(Some(crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING)) + ) + .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) .connection_event_buffer_size(1024); if let Some(spawner) = params.executor { @@ -374,7 +397,7 @@ impl NetworkWorker { // Add external addresses. for addr in ¶ms.network_config.public_addresses { - Swarm::::add_external_address(&mut swarm, addr.clone()); + Swarm::::add_external_address(&mut swarm, addr.clone(), AddressScore::Infinite); } let external_addresses = Arc::new(Mutex::new(Vec::new())); @@ -490,11 +513,9 @@ impl NetworkWorker { self.network_service.user_protocol_mut().on_block_finalized(hash, &header); } - /// This should be called when blocks are added to the - /// chain by something other than the import queue. - /// Currently this is only useful for tests. - pub fn update_chain(&mut self) { - self.network_service.user_protocol_mut().update_chain(); + /// Inform the network service about new best imported block. + pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + self.network_service.user_protocol_mut().new_best_block_imported(hash, number); } /// Returns the local `PeerId`. @@ -559,10 +580,17 @@ impl NetworkWorker { .collect() }; + let peer_id = Swarm::::local_peer_id(&swarm).to_base58(); + let listened_addresses = Swarm::::listeners(&swarm).cloned().collect(); + let external_addresses = Swarm::::external_addresses(&swarm) + .map(|r| &r.addr) + .cloned() + .collect(); + NetworkState { - peer_id: Swarm::::local_peer_id(&swarm).to_base58(), - listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), - external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), + peer_id, + listened_addresses, + external_addresses, connected_peers, not_connected_peers, peerset: swarm.user_protocol_mut().peerset_debug_info(), @@ -628,7 +656,7 @@ impl NetworkService { /// > between the remote voluntarily closing a substream or a network error /// > preventing the message from being delivered. /// - /// The protocol must have been registered with `register_notifications_protocol` or + /// The protocol must have been registered with /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). /// pub fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { @@ -636,13 +664,13 @@ impl NetworkService { // `peers_notifications_sinks` mutex as soon as possible. let sink = { let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + if let Some(sink) = peers_notifications_sinks.get(&(target.clone(), protocol.clone())) { sink.clone() } else { // Notification silently discarded, as documented. - log::error!( + log::debug!( target: "sub-libp2p", - "Attempted to send notification on unknown protocol: {:?}", + "Attempted to send notification on missing or closed substream: {:?}", protocol, ); return; @@ -656,6 +684,14 @@ impl NetworkService { } // Sending is communicated to the `NotificationsSink`. + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, + protocol, + message.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); sink.send_sync_notification(protocol, message); } @@ -681,7 +717,7 @@ impl NetworkService { /// return an error. It is however possible for the entire connection to be abruptly closed, /// in which case enqueued notifications will be lost. /// - /// The protocol must have been registered with `register_notifications_protocol` or + /// The protocol must have been registered with /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). /// /// # Usage @@ -808,28 +844,6 @@ impl NetworkService { } } - /// Registers a new notifications protocol. - /// - /// After a protocol has been registered, you can call `write_notifications`. - /// - /// **Important**: This method is a work-around, and you are instead strongly encouraged to - /// pass the protocol in the `NetworkConfiguration::notifications_protocols` list instead. - /// If you have no other choice but to use this method, you are very strongly encouraged to - /// call it very early on. Any connection open will retain the protocols that were registered - /// then, and not any new one. - /// - /// Please call `event_stream` before registering a protocol, otherwise you may miss events - /// about the protocol that you have registered. - // TODO: remove this method after https://github.com/paritytech/substrate/issues/6827 - pub fn register_notifications_protocol( - &self, - protocol_name: impl Into>, - ) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { - protocol_name: protocol_name.into(), - }); - } - /// You may call this when new transactons are imported by the transaction pool. /// /// All transactions will be fetched from the `TransactionPool` that was passed at @@ -1018,21 +1032,11 @@ impl NetworkService { self.num_connected.load(Ordering::Relaxed) } - /// This function should be called when blocks are added to the chain by something other - /// than the import queue. - /// - /// > **Important**: This function is a hack and can be removed at any time. Do **not** use it. - pub fn update_chain(&self) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::UpdateChain); - } - - /// Inform the network service about an own imported block. - pub fn own_block_imported(&self, hash: B::Hash, number: NumberFor) { + /// Inform the network service about new best imported block. + pub fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::OwnBlockImported(hash, number)); + .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); } /// Utility function to extract `PeerId` from each `Multiaddr` for priority group updates. @@ -1121,6 +1125,7 @@ impl NotificationSender { Ok(r) => r, Err(()) => return Err(NotificationSenderError::Closed), }, + peer_id: self.sink.peer_id(), notification_size_metric: self.notification_size_metric.clone(), }) } @@ -1131,6 +1136,9 @@ impl NotificationSender { pub struct NotificationSenderReady<'a> { ready: Ready<'a>, + /// Target of the notification. + peer_id: &'a PeerId, + /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notification_size_metric: Option, @@ -1145,6 +1153,15 @@ impl<'a> NotificationSenderReady<'a> { notification_size_metric.observe(notification.len() as f64); } + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + self.peer_id, + self.ready.protocol_name(), + notification.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); + self.ready .send(notification) .map_err(|()| NotificationSenderError::Closed) @@ -1183,12 +1200,8 @@ enum ServiceToWorkerMsg { request: Vec, pending_response: oneshot::Sender, RequestFailure>>, }, - RegisterNotifProtocol { - protocol_name: Cow<'static, str>, - }, DisconnectPeer(PeerId), - UpdateChain, - OwnBlockImported(B::Hash, NumberFor), + NewBestBlockImported(B::Hash, NumberFor), } /// Main network worker. Must be polled in order for the network to advance. @@ -1321,14 +1334,10 @@ impl Future for NetworkWorker { }, } }, - ServiceToWorkerMsg::RegisterNotifProtocol { protocol_name } => - this.network_service.register_notifications_protocol(protocol_name), ServiceToWorkerMsg::DisconnectPeer(who) => this.network_service.user_protocol_mut().disconnect_peer(&who), - ServiceToWorkerMsg::UpdateChain => - this.network_service.user_protocol_mut().update_chain(), - ServiceToWorkerMsg::OwnBlockImported(hash, number) => - this.network_service.user_protocol_mut().own_block_imported(hash, number), + ServiceToWorkerMsg::NewBestBlockImported(hash, number) => + this.network_service.user_protocol_mut().new_best_block_imported(hash, number), } } @@ -1361,28 +1370,24 @@ impl Future for NetworkWorker { } this.import_queue.import_justification(origin, hash, nb, justification); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_finality_proofs_submitted.inc(); - } - this.import_queue.import_finality_proof(origin, hash, nb, proof); - }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. })) => { if let Some(metrics) = this.metrics.as_ref() { match result { - Ok(serve_time) => { + Ok(Some(serve_time)) => { metrics.requests_in_success_total .with_label_values(&[&protocol]) .observe(serve_time.as_secs_f64()); } + // Response time tracking is happening on a best-effort basis. Ignore + // the event in case response time could not be provided. + Ok(None) => {}, Err(err) => { let reason = match err { - ResponseFailure::Busy => "busy", ResponseFailure::Network(InboundFailure::Timeout) => "timeout", ResponseFailure::Network(InboundFailure::UnsupportedProtocols) => "unsupported", - ResponseFailure::Network(InboundFailure::ConnectionClosed) => - "connection-closed", + ResponseFailure::Network(InboundFailure::ResponseOmission) => + "busy-omitted", }; metrics.requests_in_failure_total @@ -1563,11 +1568,11 @@ impl Future for NetworkWorker { let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::B( - EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout", + EitherError::A(EitherError::A(EitherError::B( + EitherError::A(PingFailure::Timeout))))))))) => "ping-timeout", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged", + EitherError::A(EitherError::A(EitherError::A( + NotifsHandlerError::SyncNotificationsClogged)))))))) => "sync-notifications-clogged", Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => "protocol-error", Some(ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout)) => "keep-alive-timeout", None => "actively-closed", @@ -1687,7 +1692,10 @@ impl Future for NetworkWorker { // Update the variables shared with the `NetworkService`. this.num_connected.store(num_connected_peers, Ordering::Relaxed); { - let external_addresses = Swarm::::external_addresses(&this.network_service).cloned().collect(); + let external_addresses = Swarm::::external_addresses(&this.network_service) + .map(|r| &r.addr) + .cloned() + .collect(); *this.external_addresses.lock() = external_addresses; } @@ -1714,7 +1722,9 @@ impl Future for NetworkWorker { } metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); - metrics.pending_connections.set(Swarm::network_info(&this.network_service).num_connections_pending as u64); + metrics.pending_connections.set( + Swarm::network_info(&this.network_service).connection_counters().num_pending() as u64 + ); } Poll::Pending @@ -1752,23 +1762,6 @@ impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { self.protocol.user_protocol_mut().request_justification(hash, number) } - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.user_protocol_mut().request_finality_proof(hash, number) - } - fn finality_proof_imported( - &mut self, - who: PeerId, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - let success = finalization_result.is_ok(); - self.protocol.user_protocol_mut().finality_proof_import_result(request_block, finalization_result); - if !success { - info!("💔 Invalid finality proof provided by {} for #{}", who, request_block.0); - self.protocol.user_protocol_mut().disconnect_peer(&who); - self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid finality proof")); - } - } } fn ensure_addresses_consistent_with_transport<'a>( diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index a63ce7a18a519d1a816eb08b12ecf70a64fef9ab..614c24b522de2902ed01c79b6cff25801ccec24c 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -56,7 +56,6 @@ pub struct Metrics { pub distinct_peers_connections_closed_total: Counter, pub distinct_peers_connections_opened_total: Counter, pub import_queue_blocks_submitted: Counter, - pub import_queue_finality_proofs_submitted: Counter, pub import_queue_justifications_submitted: Counter, pub incoming_connections_errors_total: CounterVec, pub incoming_connections_total: Counter, @@ -112,10 +111,6 @@ impl Metrics { "import_queue_blocks_submitted", "Number of blocks submitted to the import queue.", )?, registry)?, - import_queue_finality_proofs_submitted: prometheus::register(Counter::new( - "import_queue_finality_proofs_submitted", - "Number of finality proofs submitted to the import queue.", - )?, registry)?, import_queue_justifications_submitted: prometheus::register(Counter::new( "import_queue_justifications_submitted", "Number of justifications submitted to the import queue.", diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 76a924748ad2a62270dc6ceb1da50401ab6c9ab5..225a3ae98ab5d9f0e35bbd8f69c8baa7532317cf 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -87,7 +87,6 @@ fn build_test_full_node(config: config::NetworkConfiguration) PassThroughVerifier(false), Box::new(client.clone()), None, - None, &sp_core::testing::TaskExecutor::new(), None, )); @@ -97,8 +96,6 @@ fn build_test_full_node(config: config::NetworkConfiguration) executor: None, network_config: config, chain: client.clone(), - finality_proof_provider: None, - finality_proof_request_builder: None, on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), protocol_id: config::ProtocolId::from("/test-protocol-name"), diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 035b3a9716a0221410098d5ad88771032f6ff395..4bf252d57978e4977c71496a7903270c75b05bed 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -17,9 +17,9 @@ // along with this program. If not, see . use libp2p::{ - InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, + PeerId, Transport, core::{ - self, either::{EitherOutput, EitherTransport}, muxing::StreamMuxerBox, + self, either::EitherTransport, muxing::StreamMuxerBox, transport::{Boxed, OptionalTransport}, upgrade }, mplex, identity, bandwidth, wasm_ext, noise @@ -74,11 +74,7 @@ pub fn build_transport( // For more information about these two panics, see in "On the Importance of // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, // and Richard J. Lipton. - let noise_keypair_legacy = noise::Keypair::::new().into_authentic(&keypair) - .expect("can only fail in case of a hardware bug; since this signing is performed only \ - once and at initialization, we're taking the bet that the inconvenience of a very \ - rare panic here is basically zero"); - let noise_keypair_spec = noise::Keypair::::new().into_authentic(&keypair) + let noise_keypair = noise::Keypair::::new().into_authentic(&keypair) .expect("can only fail in case of a hardware bug; since this signing is performed only \ once and at initialization, we're taking the bet that the inconvenience of a very \ rare panic here is basically zero"); @@ -87,19 +83,9 @@ pub fn build_transport( let mut noise_legacy = noise::LegacyConfig::default(); noise_legacy.recv_legacy_handshake = true; - let mut xx_config = noise::NoiseConfig::xx(noise_keypair_spec); + let mut xx_config = noise::NoiseConfig::xx(noise_keypair); xx_config.set_legacy_config(noise_legacy.clone()); - let mut ix_config = noise::NoiseConfig::ix(noise_keypair_legacy); - ix_config.set_legacy_config(noise_legacy); - - let extract_peer_id = |result| match result { - EitherOutput::First((peer_id, o)) => (peer_id, EitherOutput::First(o)), - EitherOutput::Second((peer_id, o)) => (peer_id, EitherOutput::Second(o)), - }; - - core::upgrade::SelectUpgrade::new(xx_config.into_authenticated(), ix_config.into_authenticated()) - .map_inbound(extract_peer_id) - .map_outbound(extract_peer_id) + xx_config.into_authenticated() }; let multiplexing_config = { @@ -115,7 +101,7 @@ pub fn build_transport( core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) }; - let transport = transport.upgrade(upgrade::Version::V1) + let transport = transport.upgrade(upgrade::Version::V1Lazy) .authenticate(authentication_config) .multiplex(multiplexing_config) .timeout(Duration::from_secs(20)) diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index a74aa90d4f4ce9ad63979f9d50e218ca4e9fdcf6..880e2c1f04ed685b8472b2dff997cf3b5cf9e7df 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.30.1", default-features = false } +libp2p = { version = "0.32.2", default-features = false } sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } sc-client-api = { version = "2.0.0", path = "../../api" } diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 1d2cd3d687de92f5f90a4c8e8d729b1e9460e5b9..a5d0600abefeaaa4d10f757d8ee6b193320c6063 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -107,7 +107,6 @@ fn async_import_queue_drops() { verifier, Box::new(substrate_test_runtime_client::new()), None, - None, &executor, None, ); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 1aec3dae22b920d1b523bc6a76b0ee920d85ef4e..a70ecb4fb0484d1b9d6972b74743c30e570b1844 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -29,7 +29,6 @@ use std::{ use libp2p::build_multiaddr; use log::trace; -use sc_network::config::FinalityProofProvider; use sp_blockchain::{ HeaderBackend, Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, @@ -44,14 +43,14 @@ use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_network::config::Role; use sp_consensus::block_validation::{DefaultBlockAnnounceValidator, BlockAnnounceValidator}; use sp_consensus::import_queue::{ - BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, + BasicQueue, BoxJustificationImport, Verifier, }; use sp_consensus::block_import::{BlockImport, ImportResult}; use sp_consensus::Error as ConsensusError; use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; use futures::prelude::*; use sc_network::{NetworkWorker, NetworkService, config::ProtocolId}; -use sc_network::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder}; +use sc_network::config::{NetworkConfiguration, TransportConfig}; use libp2p::PeerId; use parking_lot::Mutex; use sp_core::H256; @@ -280,7 +279,7 @@ impl Peer { where F: FnMut(BlockBuilder) -> Block { let best_hash = self.client.info().best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) + self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false, true) } /// Add blocks to the peer -- edit the block before adding. The chain will @@ -292,6 +291,7 @@ impl Peer { origin: BlockOrigin, mut edit_block: F, headers_only: bool, + inform_sync_about_new_best_block: bool, ) -> H256 where F: FnMut(BlockBuilder) -> Block { let full_client = self.client.as_full() .expect("blocks could only be generated by full clients"); @@ -329,7 +329,12 @@ impl Peer { at = hash; } - self.network.update_chain(); + if inform_sync_about_new_best_block { + self.network.new_best_block_imported( + at, + full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number().clone(), + ); + } self.network.service().announce_block(at.clone(), Vec::new()); at } @@ -343,18 +348,36 @@ impl Peer { /// Push blocks to the peer (simplified: with or without a TX) pub fn push_headers(&mut self, count: usize) -> H256 { let best_hash = self.client.info().best_hash; - self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) + self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true, true) } /// Push blocks to the peer (simplified: with or without a TX) starting from /// given hash. pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { - self.generate_tx_blocks_at(at, count, with_tx, false) + self.generate_tx_blocks_at(at, count, with_tx, false, true) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash without informing the sync protocol about the new best block. + pub fn push_blocks_at_without_informing_sync( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + ) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false, false) } /// Push blocks/headers to the peer (simplified: with or without a TX) starting from /// given hash. - fn generate_tx_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool, headers_only:bool) -> H256 { + fn generate_tx_blocks_at( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + headers_only: bool, + inform_sync_about_new_best_block: bool, + ) -> H256 { let mut nonce = 0; if with_tx { self.generate_blocks_at( @@ -371,7 +394,8 @@ impl Peer { nonce = nonce + 1; builder.build().unwrap().block }, - headers_only + headers_only, + inform_sync_about_new_best_block, ) } else { self.generate_blocks_at( @@ -380,6 +404,7 @@ impl Peer { BlockOrigin::File, |builder| builder.build().unwrap().block, headers_only, + inform_sync_about_new_best_block, ) } } @@ -586,20 +611,10 @@ pub trait TestNetFactory: Sized { -> ( BlockImportAdapter, Option>, - Option>, - Option>, Self::PeerData, ) { - (client.as_block_import(), None, None, None, Default::default()) - } - - /// Get finality proof provider (if supported). - fn make_finality_proof_provider( - &self, - _client: PeersClient, - ) -> Option>> { - None + (client.as_block_import(), None, Default::default()) } fn default_config() -> ProtocolConfig { @@ -636,8 +651,6 @@ pub trait TestNetFactory: Sized { let ( block_import, justification_import, - finality_proof_import, - finality_proof_request_builder, data, ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); @@ -652,7 +665,6 @@ pub trait TestNetFactory: Sized { verifier.clone(), Box::new(block_import.clone()), justification_import, - finality_proof_import, &sp_core::testing::TaskExecutor::new(), None, )); @@ -675,10 +687,6 @@ pub trait TestNetFactory: Sized { executor: None, network_config, chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Full(client.clone(), backend.clone()), - ), - finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), protocol_id: ProtocolId::from("test-protocol-name"), @@ -717,8 +725,6 @@ pub trait TestNetFactory: Sized { let ( block_import, justification_import, - finality_proof_import, - finality_proof_request_builder, data, ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); @@ -733,7 +739,6 @@ pub trait TestNetFactory: Sized { verifier.clone(), Box::new(block_import.clone()), justification_import, - finality_proof_import, &sp_core::testing::TaskExecutor::new(), None, )); @@ -755,10 +760,6 @@ pub trait TestNetFactory: Sized { executor: None, network_config, chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Light(client.clone(), backend.clone()) - ), - finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), protocol_id: ProtocolId::from("test-protocol-name"), @@ -989,16 +990,12 @@ impl TestNetFactory for JustificationTestNet { -> ( BlockImportAdapter, Option>, - Option>, - Option>, Self::PeerData, ) { ( client.as_block_import(), Some(Box::new(ForceFinalized(client))), - None, - None, Default::default(), ) } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 64985871d85e0737ca16a755381310b94a0357d1..9a488ae4fa49c32b2e3ec62f8e43b97a5eafb8a3 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -779,3 +779,38 @@ fn wait_until_deferred_block_announce_validation_is_ready() { net.block_until_idle(); } } + +/// When we don't inform the sync protocol about the best block, a node will not sync from us as the +/// handshake is not does not contain our best block. +#[test] +fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { + sp_tracing::try_init_simple(); + log::trace!(target: "sync", "Test"); + let mut net = TestNet::new(1); + + // Produce some blocks + let block_hash = net.peer(0).push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true); + + // Add a node and wait until they are connected + net.add_full_peer_with_config(Default::default()); + net.block_until_connected(); + net.block_until_idle(); + + // The peer should not have synced the block. + assert!(!net.peer(1).has_block(&block_hash)); + + // Make sync protocol aware of the best block + net.peer(0).network_service().new_best_block_imported(block_hash, 3); + net.block_until_idle(); + + // Connect another node that should now sync to the tip + net.add_full_peer_with_config(Default::default()); + net.block_until_connected(); + + while !net.peer(2).has_block(&block_hash) { + net.block_until_idle(); + } + + // However peer 1 should still not have the block. + assert!(!net.peer(1).has_block(&block_hash)); +} diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 5686d33da9b233eb874be1c05d88f36ca70a0f35..1a31d278eb53befefb3acbedb86a1f95efe2f025 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -33,7 +33,7 @@ sc-network = { version = "0.8.0", path = "../network" } sc-keystore = { version = "2.0.0", path = "../keystore" } [target.'cfg(not(target_os = "unknown"))'.dependencies] -hyper = "0.13.2" +hyper = "0.13.9" hyper-rustls = "0.21.0" [dev-dependencies] diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index efca00a24deb6c859289046b02d3fc00e10246e6..41e2033bccfc2532f557422807c569df272b454b 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.4" -libp2p = { version = "0.30.1", default-features = false } +libp2p = { version = "0.32.2", default-features = false } sp-utils = { version = "2.0.0", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 575743afa079c0f3c2ba2ddd1f0bf3bfdc195936..bb08bdc18e678d3955621ae62b2502978d8a2ab4 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -36,7 +36,7 @@ const BANNED_THRESHOLD: i32 = 82 * (i32::min_value() / 100); /// Reputation change for a node when we get disconnected from it. const DISCONNECT_REPUTATION_CHANGE: i32 = -256; /// Reserved peers group ID -const RESERVED_NODES: &'static str = "reserved"; +const RESERVED_NODES: &str = "reserved"; /// Amount of time between the moment we disconnect from a node and the moment we remove it from /// the list. const FORGET_AFTER: Duration = Duration::from_secs(3600); @@ -87,7 +87,7 @@ impl PeersetHandle { /// Has no effect if the node was already a reserved peer. /// /// > **Note**: Keep in mind that the networking has to know an address for this node, - /// > otherwise it will not be able to connect to it. + /// > otherwise it will not be able to connect to it. pub fn add_reserved_peer(&self, peer_id: PeerId) { let _ = self.tx.unbounded_send(Action::AddReservedPeer(peer_id)); } @@ -103,7 +103,7 @@ impl PeersetHandle { pub fn set_reserved_only(&self, reserved: bool) { let _ = self.tx.unbounded_send(Action::SetReservedOnly(reserved)); } - + /// Set reserved peers to the new set. pub fn set_reserved_peers(&self, peer_ids: HashSet) { let _ = self.tx.unbounded_send(Action::SetReservedPeers(peer_ids)); @@ -169,7 +169,7 @@ pub struct PeersetConfig { /// List of bootstrap nodes to initialize the peer with. /// /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. + /// > otherwise it will not be able to connect to them. pub bootnodes: Vec, /// If true, we only accept nodes in [`PeersetConfig::priority_groups`]. @@ -178,7 +178,7 @@ pub struct PeersetConfig { /// Lists of nodes we should always be connected to. /// /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. + /// > otherwise it will not be able to connect to them. pub priority_groups: Vec<(String, HashSet)>, } @@ -252,7 +252,7 @@ impl Peerset { fn on_remove_reserved_peer(&mut self, peer_id: PeerId) { self.on_remove_from_priority_group(RESERVED_NODES, peer_id); } - + fn on_set_reserved_peers(&mut self, peer_ids: HashSet) { self.on_set_priority_group(RESERVED_NODES, peer_ids); } @@ -357,8 +357,18 @@ impl Peerset { ); } }, - peersstate::Peer::NotConnected(mut peer) => peer.add_reputation(change.value), - peersstate::Peer::Unknown(peer) => peer.discover().add_reputation(change.value), + peersstate::Peer::NotConnected(mut peer) => { + trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", + peer_id, change.value, peer.reputation(), change.reason + ); + peer.add_reputation(change.value) + }, + peersstate::Peer::Unknown(peer) => { + trace!(target: "peerset", "Discover {}: {:+}. Reason: {}", + peer_id, change.value, change.reason + ); + peer.discover().add_reputation(change.value) + }, } } @@ -430,10 +440,9 @@ impl Peerset { .get(RESERVED_NODES) .into_iter() .flatten() - .filter(move |n| { + .find(move |n| { data.peer(n).into_connected().is_none() }) - .next() .cloned() }; @@ -469,10 +478,9 @@ impl Peerset { self.priority_groups .values() .flatten() - .filter(move |n| { + .find(move |n| { data.peer(n).into_connected().is_none() }) - .next() .cloned() }; @@ -497,21 +505,17 @@ impl Peerset { } // Now, we try to connect to non-priority nodes. - loop { - // Try to grab the next node to attempt to connect to. - let next = match self.data.highest_not_connected_peer() { - Some(p) => p, - None => break, // No known node to add. - }; - + while let Some(next) = self.data.highest_not_connected_peer() { // Don't connect to nodes with an abysmal reputation. if next.reputation() < BANNED_THRESHOLD { break; } match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. + Ok(conn) => self + .message_queue + .push_back(Message::Connect(conn.into_peer_id())), + Err(_) => break, // No more slots available. } } } @@ -530,11 +534,9 @@ impl Peerset { trace!(target: "peerset", "Incoming {:?}", peer_id); self.update_time(); - if self.reserved_only { - if !self.priority_groups.get(RESERVED_NODES).map_or(false, |n| n.contains(&peer_id)) { - self.message_queue.push_back(Message::Reject(index)); - return; - } + if self.reserved_only && !self.priority_groups.get(RESERVED_NODES).map_or(false, |n| n.contains(&peer_id)) { + self.message_queue.push_back(Message::Reject(index)); + return; } let not_connected = match self.data.peer(&peer_id) { @@ -563,8 +565,6 @@ impl Peerset { /// Must only be called after the PSM has either generated a `Connect` message with this /// `PeerId`, or accepted an incoming connection with this `PeerId`. pub fn dropped(&mut self, peer_id: PeerId) { - trace!(target: "peerset", "Dropping {:?}", peer_id); - // We want reputations to be up-to-date before adjusting them. self.update_time(); @@ -572,6 +572,8 @@ impl Peerset { peersstate::Peer::Connected(mut entry) => { // Decrease the node's reputation so that we don't try it again and again and again. entry.add_reputation(DISCONNECT_REPUTATION_CHANGE); + trace!(target: "peerset", "Dropping {}: {:+} to {}", + peer_id, DISCONNECT_REPUTATION_CHANGE, entry.reputation()); entry.disconnect(); } peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => @@ -584,7 +586,7 @@ impl Peerset { /// Adds discovered peer ids to the PSM. /// /// > **Note**: There is no equivalent "expired" message, meaning that it is the responsibility - /// > of the PSM to remove `PeerId`s that fail to dial too often. + /// > of the PSM to remove `PeerId`s that fail to dial too often. pub fn discovered>(&mut self, peer_ids: I) { let mut discovered_any = false; @@ -747,12 +749,12 @@ mod tests { let (mut peerset, _handle) = Peerset::from_config(config); peerset.incoming(incoming.clone(), ii); - peerset.incoming(incoming.clone(), ii4); - peerset.incoming(incoming2.clone(), ii2); - peerset.incoming(incoming3.clone(), ii3); + peerset.incoming(incoming, ii4); + peerset.incoming(incoming2, ii2); + peerset.incoming(incoming3, ii3); assert_messages(peerset, vec![ - Message::Connect(bootnode.clone()), + Message::Connect(bootnode), Message::Accept(ii), Message::Accept(ii2), Message::Reject(ii3), @@ -772,7 +774,7 @@ mod tests { }; let (mut peerset, _) = Peerset::from_config(config); - peerset.incoming(incoming.clone(), ii); + peerset.incoming(incoming, ii); assert_messages(peerset, vec![ Message::Reject(ii), diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 59879f629e31ecdbfbcb224a8a7dfb437f150746..19b2489eff486d7aae042c66771d73733981a3b9 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -42,8 +42,8 @@ pub struct PeersState { /// List of nodes that we know about. /// /// > **Note**: This list should really be ordered by decreasing reputation, so that we can - /// easily select the best node to connect to. As a first draft, however, we don't - /// sort, to make the logic easier. + /// easily select the best node to connect to. As a first draft, however, we don't + /// sort, to make the logic easier. nodes: HashMap, /// Number of slot-occupying nodes for which the `ConnectionState` is `In`. @@ -130,7 +130,7 @@ impl PeersState { /// Returns an object that grants access to the state of a peer. pub fn peer<'a>(&'a mut self, peer_id: &'a PeerId) -> Peer<'a> { match self.nodes.get_mut(peer_id) { - None => return Peer::Unknown(UnknownPeer { + None => Peer::Unknown(UnknownPeer { parent: self, peer_id: Cow::Borrowed(peer_id), }), @@ -585,7 +585,7 @@ mod tests { peers_state.peer(&id2).into_connected().unwrap().disconnect(); assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); peers_state.peer(&id1).into_not_connected().unwrap().set_reputation(-100); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2.clone())); + assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2)); } #[test] diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index 6fa29e3d834cfcdaa0b035e03849432e20e7b3f3..e02742fc40ad4427cdbf037468d05b9f0e63f9ba 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -115,8 +115,8 @@ fn test_once() { 4 => if let Some(id) = known_nodes.iter() .filter(|n| incoming_nodes.values().all(|m| m != *n) && !connected_nodes.contains(*n)) .choose(&mut rng) { - peerset.incoming(id.clone(), next_incoming_id.clone()); - incoming_nodes.insert(next_incoming_id.clone(), id.clone()); + peerset.incoming(id.clone(), next_incoming_id); + incoming_nodes.insert(next_incoming_id, id.clone()); next_incoming_id.0 += 1; } diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index fbeec23ea50856068f65cfdb5e2aadbd33b0b3f0..f05f1fada901e06da983ed2cc601642d16bcab52 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -108,4 +108,18 @@ pub trait SystemApi { /// known block. #[rpc(name = "system_syncState", returns = "SyncState")] fn system_sync_state(&self) -> Receiver>; + + /// Adds the supplied directives to the current log filter + /// + /// The syntax is identical to the CLI `=`: + /// + /// `sync=debug,state=trace` + #[rpc(name = "system_addLogFilter", returns = "()")] + fn system_add_log_filter(&self, directives: String) + -> Result<(), jsonrpc_core::Error>; + + /// Resets the log filter to Substrate defaults + #[rpc(name = "system_resetLogFilter", returns = "()")] + fn system_reset_log_filter(&self) + -> Result<(), jsonrpc_core::Error>; } diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 74139714c8cb7eded05cdc7644e0f1a26f35e46b..233ceab3cf8a6536cdd97ab911dacf72c01e8efd 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -32,36 +32,41 @@ use futures::{future::Either, Future}; /// Metrics for RPC middleware #[derive(Debug, Clone)] pub struct RpcMetrics { - rpc_calls: CounterVec, + rpc_calls: Option>, } impl RpcMetrics { /// Create an instance of metrics pub fn new(metrics_registry: Option<&Registry>) -> Result { - metrics_registry.and_then(|r| { - Some(RpcMetrics { - rpc_calls: register(CounterVec::new( - Opts::new( - "rpc_calls_total", - "Number of rpc calls received", - ), - &["protocol"] - ).ok()?, r).ok()?, - }) - }).ok_or(PrometheusError::Msg("Cannot register metric".to_string())) + Ok(Self { + rpc_calls: metrics_registry.map(|r| + register( + CounterVec::new( + Opts::new( + "rpc_calls_total", + "Number of rpc calls received", + ), + &["protocol"] + )?, + r, + ) + ).transpose()?, + }) } } /// Middleware for RPC calls pub struct RpcMiddleware { - metrics: Option, + metrics: RpcMetrics, transport_label: String, } impl RpcMiddleware { - /// Create an instance of middleware with provided metrics - /// transport_label is used as a label for Prometheus collector - pub fn new(metrics: Option, transport_label: &str) -> Self { + /// Create an instance of middleware. + /// + /// - `metrics`: Will be used to report statistics. + /// - `transport_label`: The label that is used when reporting the statistics. + pub fn new(metrics: RpcMetrics, transport_label: &str) -> Self { RpcMiddleware { metrics, transport_label: String::from(transport_label), @@ -78,8 +83,8 @@ impl RequestMiddleware for RpcMiddleware { F: Fn(Request, M) -> X + Send + Sync, X: Future, Error = ()> + Send + 'static, { - if let Some(ref metrics) = self.metrics { - metrics.rpc_calls.with_label_values(&[self.transport_label.as_str()]).inc(); + if let Some(ref rpc_calls) = self.metrics.rpc_calls { + rpc_calls.with_label_values(&[self.transport_label.as_str()]).inc(); } Either::B(next(request, meta)) diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 0af880f4330bf6ad6e82aa6565da7d6e07d40c8b..e68ac6e4e918fa3ee5978e055eaf6bff776d1b08 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -37,6 +37,7 @@ sc-block-builder = { version = "0.8.0", path = "../../client/block-builder" } sc-keystore = { version = "2.0.0", path = "../keystore" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sc-tracing = { version = "2.0.0", path = "../../client/tracing" } hash-db = { version = "0.15.2", default-features = false } parking_lot = "0.10.0" lazy_static = { version = "1.4.0", optional = true } @@ -50,6 +51,7 @@ sp-io = { version = "2.0.0", path = "../../primitives/io" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "0.1.22" sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } +sc-cli = { version = "0.8.0", path = "../cli" } [features] test-helpers = ["lazy_static"] diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 1db90e209d0d65206c10ac6d58778529c0762475..1a2d84e4e57272771a8679f5577aa120295b92d2 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -215,7 +215,7 @@ impl AuthorApi, BlockHash

> for Author Ok(watcher) => { subscriptions.add(subscriber, move |sink| { sink - .sink_map_err(|_| unimplemented!()) + .sink_map_err(|e| log::debug!("Subscription sink failed: {:?}", e)) .send_all(Compat::new(watcher)) .map(|_| ()) }); diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index fda73cea271103c6b797293750459f3c9e0b952e..a1b9fbc4eebc579c7c47e3e7e65bc91b9f764aef 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -541,7 +541,7 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys( &BlockId::Hash(block), @@ -563,7 +563,7 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage( &BlockId::Hash(block), @@ -585,7 +585,7 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_hash( &BlockId::Hash(block), diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index 17fb6b77a5710378d0dab992a7d364fedba21800..f1ebf5f702a27a3c1ec5ca1e10e5845fce37777c 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -197,4 +197,15 @@ impl SystemApi::Number> for Sy let _ = self.send_back.unbounded_send(Request::SyncState(tx)); Receiver(Compat::new(rx)) } + + fn system_add_log_filter(&self, directives: String) -> std::result::Result<(), rpc::Error> { + self.deny_unsafe.check_if_safe()?; + sc_tracing::add_directives(&directives); + sc_tracing::reload_filter().map_err(|_e| rpc::Error::internal_error()) + } + + fn system_reset_log_filter(&self)-> std::result::Result<(), rpc::Error> { + self.deny_unsafe.check_if_safe()?; + sc_tracing::reset_log_filter().map_err(|_e| rpc::Error::internal_error()) + } } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 61f1940dc2010cc25ab19e8d2565877e349749d1..fa3574e9dae029b1eb89d446829547634a775fa8 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -24,7 +24,10 @@ use substrate_test_runtime_client::runtime::Block; use assert_matches::assert_matches; use futures::prelude::*; use sp_utils::mpsc::tracing_unbounded; -use std::thread; +use std::{ + process::{Stdio, Command}, env, io::{BufReader, BufRead, Write}, + sync::{Arc, Mutex}, thread, time::Duration +}; struct Status { pub peers: usize, @@ -333,3 +336,81 @@ fn system_network_remove_reserved() { assert_eq!(runtime.block_on(good_fut), Ok(())); assert!(runtime.block_on(bad_fut).is_err()); } + +#[test] +fn test_add_reset_log_filter() { + const EXPECTED_BEFORE_ADD: &'static str = "EXPECTED_BEFORE_ADD"; + const EXPECTED_AFTER_ADD: &'static str = "EXPECTED_AFTER_ADD"; + + // Enter log generation / filter reload + if std::env::var("TEST_LOG_FILTER").is_ok() { + sc_cli::init_logger("test_before_add=debug", Default::default(), Default::default(), false).unwrap(); + for line in std::io::stdin().lock().lines() { + let line = line.expect("Failed to read bytes"); + if line.contains("add_reload") { + assert!(api(None).system_add_log_filter("test_after_add".to_owned()).is_ok(), "`system_add_log_filter` failed"); + } else if line.contains("reset") { + assert!(api(None).system_reset_log_filter().is_ok(), "`system_reset_log_filter` failed"); + } else if line.contains("exit") { + return; + } + log::debug!(target: "test_before_add", "{}", EXPECTED_BEFORE_ADD); + log::debug!(target: "test_after_add", "{}", EXPECTED_AFTER_ADD); + } + } + + // Call this test again to enter the log generation / filter reload block + let test_executable = env::current_exe().expect("Unable to get current executable!"); + let mut child_process = Command::new(test_executable) + .env("TEST_LOG_FILTER", "1") + .args(&["--nocapture", "test_add_reset_log_filter"]) + .stdin(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .unwrap(); + + let child_stderr = child_process.stderr.take().expect("Could not get child stderr"); + let mut child_out = BufReader::new(child_stderr); + let mut child_in = child_process.stdin.take().expect("Could not get child stdin"); + + let child_out_str = Arc::new(Mutex::new(String::new())); + let shared = child_out_str.clone(); + + let _handle = thread::spawn(move || { + let mut line = String::new(); + while let Ok(_) = child_out.read_line(&mut line) { + shared.lock().unwrap().push_str(&line); + line.clear(); + } + }); + + // Initiate logs loop in child process + child_in.write(b"\n").unwrap(); + thread::sleep(Duration::from_millis(100)); + let test1_str = child_out_str.lock().unwrap().clone(); + // Assert that only the first target is present + assert!(test1_str.contains(EXPECTED_BEFORE_ADD)); + assert!(!test1_str.contains(EXPECTED_AFTER_ADD)); + child_out_str.lock().unwrap().clear(); + + // Initiate add directive & reload in child process + child_in.write(b"add_reload\n").unwrap(); + thread::sleep(Duration::from_millis(100)); + let test2_str = child_out_str.lock().unwrap().clone(); + // Assert that both targets are now present + assert!(test2_str.contains(EXPECTED_BEFORE_ADD)); + assert!(test2_str.contains(EXPECTED_AFTER_ADD)); + child_out_str.lock().unwrap().clear(); + + // Initiate logs filter reset in child process + child_in.write(b"reset\n").unwrap(); + thread::sleep(Duration::from_millis(100)); + let test3_str = child_out_str.lock().unwrap().clone(); + // Assert that only the first target is present as it was initially + assert!(test3_str.contains(EXPECTED_BEFORE_ADD)); + assert!(!test3_str.contains(EXPECTED_AFTER_ADD)); + + // Return from child process + child_in.write(b"exit\n").unwrap(); + assert!(child_process.wait().expect("Error waiting for child process").success()); +} diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index b85ebde3c1d28f95dde46575137be46cad0b0967..4350e1a2bf2a90aef7d9e70d28910667aab6c0b3 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -24,7 +24,7 @@ wasmtime = [ test-helpers = [] [dependencies] -derive_more = "0.99.2" +thiserror = "1.0.21" futures01 = { package = "futures", version = "0.1.29" } futures = { version = "0.3.4", features = ["compat"] } jsonrpc-pubsub = "15.1" @@ -32,7 +32,7 @@ jsonrpc-core = "15.1" rand = "0.7.3" parking_lot = "0.10.0" lazy_static = "1.4.0" -log = "0.4.8" +log = "0.4.11" slog = { version = "2.5.2", features = ["nested-values"] } futures-timer = "3.0.1" wasm-timer = "0.2" @@ -76,13 +76,13 @@ sc-offchain = { version = "2.0.0", path = "../offchain" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} sc-tracing = { version = "2.0.0", path = "../tracing" } sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -tracing = "0.1.19" +tracing = "0.1.22" tracing-futures = { version = "0.2.4" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] tempfile = "3.1.0" -directories = "2.0.2" +directories = "3.0.1" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 7d613f2bc6292687023a570300af63126c59b2f6..5e511d3d7c77fa1f6ce2d31f670ed221869f44f2 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -41,7 +41,7 @@ use futures::{ }; use sc_keystore::LocalKeystore; use log::{info, warn}; -use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +use sc_network::config::{Role, OnDemand}; use sc_network::NetworkService; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ @@ -59,7 +59,7 @@ use sp_core::traits::{ CodeExecutor, SpawnNamed, }; -use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::BuildStorage; use sc_client_api::{ BlockBackend, BlockchainEvents, @@ -205,12 +205,25 @@ pub type TLightClientWithBackend = Client< TRtApi, >; -enum KeystoreContainerInner { - Local(Arc) +trait AsCryptoStoreRef { + fn keystore_ref(&self) -> Arc; + fn sync_keystore_ref(&self) -> Arc; +} + +impl AsCryptoStoreRef for Arc where T: CryptoStore + SyncCryptoStore + 'static { + fn keystore_ref(&self) -> Arc { + self.clone() + } + fn sync_keystore_ref(&self) -> Arc { + self.clone() + } } /// Construct and hold different layers of Keystore wrappers -pub struct KeystoreContainer(KeystoreContainerInner); +pub struct KeystoreContainer { + remote: Option>, + local: Arc, +} impl KeystoreContainer { /// Construct KeystoreContainer @@ -223,20 +236,35 @@ impl KeystoreContainer { KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); - Ok(Self(KeystoreContainerInner::Local(keystore))) + Ok(Self{remote: Default::default(), local: keystore}) + } + + /// Set the remote keystore. + /// Should be called right away at startup and not at runtime: + /// even though this overrides any previously set remote store, it + /// does not reset any references previously handed out - they will + /// stick araound. + pub fn set_remote_keystore(&mut self, remote: Arc) + where T: CryptoStore + SyncCryptoStore + 'static + { + self.remote = Some(Box::new(remote)) } /// Returns an adapter to the asynchronous keystore that implements `CryptoStore` pub fn keystore(&self) -> Arc { - match self.0 { - KeystoreContainerInner::Local(ref keystore) => keystore.clone(), + if let Some(c) = self.remote.as_ref() { + c.keystore_ref() + } else { + self.local.clone() } } /// Returns the synchrnous keystore wrapper pub fn sync_keystore(&self) -> SyncCryptoStorePtr { - match self.0 { - KeystoreContainerInner::Local(ref keystore) => keystore.clone() as SyncCryptoStorePtr, + if let Some(c) = self.remote.as_ref() { + c.sync_keystore_ref() + } else { + self.local.clone() as SyncCryptoStorePtr } } @@ -249,9 +277,7 @@ impl KeystoreContainer { /// Using the [`LocalKeystore`] will result in loosing the ability to use any other keystore implementation, like /// a remote keystore for example. Only use this if you a certain that you require it! pub fn local_keystore(&self) -> Option> { - match self.0 { - KeystoreContainerInner::Local(ref keystore) => Some(keystore.clone()), - } + Some(self.local.clone()) } } @@ -603,12 +629,12 @@ pub fn spawn_tasks( on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, backend.offchain_storage(), system_rpc_tx.clone() ); - let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry()).ok(); - let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.as_ref())?; + let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; + let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.clone())?; // This is used internally, so don't restrict access to unsafe RPC let rpc_handlers = RpcHandlers(Arc::new(gen_handler( sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.as_ref().cloned(), "inbrowser") + sc_rpc_server::RpcMiddleware::new(rpc_metrics, "inbrowser") ).into())); // Telemetry @@ -830,10 +856,6 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { pub block_announce_validator_builder: Option) -> Box + Send> + Send >>, - /// An optional finality proof request builder. - pub finality_proof_request_builder: Option>, - /// An optional, shared finality proof request provider. - pub finality_proof_provider: Option>>, } /// Build the network service, the network status sinks and an RPC sender. @@ -858,7 +880,7 @@ pub fn build_network( { let BuildNetworkParams { config, client, transaction_pool, spawn_handle, import_queue, on_demand, - block_announce_validator_builder, finality_proof_request_builder, finality_proof_provider, + block_announce_validator_builder, } = params; let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { @@ -896,8 +918,6 @@ pub fn build_network( }, network_config: config.network.clone(), chain: client.clone(), - finality_proof_provider, - finality_proof_request_builder, on_demand: on_demand, transaction_pool: transaction_pool_adapter as _, import_queue: Box::new(import_queue), diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 164976ecfe8717be2801b54e706d80acb5473f0a..cd01a5877758dad6e7dee6ec1fbc898a85377e46 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -137,7 +137,9 @@ where )?; let state = self.backend.state_at(*id)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = self.check_override(state_runtime_code.runtime_code()?, id)?; + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, id)?; let return_data = StateMachine::new( &state, @@ -211,7 +213,10 @@ where let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); // It is important to extract the runtime code here before we create the proof // recorder. - let runtime_code = self.check_override(state_runtime_code.runtime_code()?, at)?; + + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; let backend = sp_state_machine::ProvingBackend::new_with_recorder( trie_state, @@ -236,7 +241,9 @@ where }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = self.check_override(state_runtime_code.runtime_code()?, at)?; + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; let mut state_machine = StateMachine::new( &state, @@ -273,7 +280,9 @@ where None, ); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - self.executor.runtime_version(&mut ext, &state_runtime_code.runtime_code()?) + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + self.executor.runtime_version(&mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } @@ -284,6 +293,9 @@ where method: &str, call_data: &[u8] ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_state); + let runtime_code = state_runtime_code.runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( trie_state, overlay, @@ -291,7 +303,7 @@ where self.spawn_handle.clone(), method, call_data, - &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, + &runtime_code, ) .map_err(Into::into) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index d423fdee39b6cbebe368b222001e79131459c297..84174738b5608d8fbb833178f96c4daec02f27b8 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -297,7 +297,8 @@ impl Client where config: ClientConfig, ) -> sp_blockchain::Result { if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { - let genesis_storage = build_genesis_storage.build_storage()?; + let genesis_storage = build_genesis_storage.build_storage() + .map_err(sp_blockchain::Error::Storage)?; let mut op = backend.begin_operation()?; backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; let state_root = op.reset_storage(genesis_storage)?; @@ -880,7 +881,7 @@ impl Client where &state, changes_trie_state.as_ref(), *parent_hash, - )?; + ).map_err(sp_blockchain::Error::Storage)?; if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root @@ -1159,12 +1160,12 @@ impl Client where /// Prepare in-memory header that is used in execution environment. fn prepare_environment_block(&self, parent: &BlockId) -> sp_blockchain::Result { - let parent_header = self.backend.blockchain().expect_header(*parent)?; + let parent_hash = self.backend.blockchain().expect_block_hash_from_id(parent)?; Ok(<::Header as HeaderT>::new( self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(), Default::default(), Default::default(), - parent_header.hash(), + parent_hash, Default::default(), )) } @@ -1900,8 +1901,7 @@ impl BlockBackend for Client self.body(id) } - fn block(&self, id: &BlockId) -> sp_blockchain::Result>> - { + fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { (Some(header), Some(extrinsics), justification) => Some(SignedBlock { block: Block::new(header, extrinsics), justification }), @@ -1910,26 +1910,7 @@ impl BlockBackend for Client } fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { - // this can probably be implemented more efficiently - if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); - } - } - let hash_and_number = match id.clone() { - BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), - BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), - }; - match hash_and_number { - Some((hash, number)) => { - if self.backend.have_state_at(&hash, number) { - Ok(BlockStatus::InChainWithState) - } else { - Ok(BlockStatus::InChainPruned) - } - } - None => Ok(BlockStatus::Unknown), - } + Client::block_status(self, id) } fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { diff --git a/client/service/src/client/mod.rs b/client/service/src/client/mod.rs index b3aa2fa076af5bc91f44666e174e738c5e75947a..e4d1dc8bd8509f59c7575521d0c2511316dc29ba 100644 --- a/client/service/src/client/mod.rs +++ b/client/service/src/client/mod.rs @@ -23,22 +23,23 @@ //! //! - A database containing the blocks and chain state, generally referred to as //! the [`Backend`](sc_client_api::backend::Backend). -//! - A runtime environment, generally referred to as the [`Executor`](CallExecutor). +//! - A runtime environment, generally referred to as the +//! [`Executor`](sc_client_api::call_executor::CallExecutor). //! //! # Initialization //! //! Creating a [`Client`] is done by calling the `new` method and passing to it a -//! [`Backend`](sc_client_api::backend::Backend) and an [`Executor`](CallExecutor). +//! [`Backend`](sc_client_api::backend::Backend) and an +//! [`Executor`](sc_client_api::call_executor::CallExecutor). //! //! The former is typically provided by the `sc-client-db` crate. //! //! The latter typically requires passing one of: //! //! - A [`LocalCallExecutor`] running the runtime locally. -//! - A [`RemoteCallExecutor`](light::call_executor::RemoteCallRequest) that will ask a +//! - A [`RemoteCallExecutor`](sc_client_api::light::RemoteCallRequest) that will ask a //! third-party to perform the executions. -//! - A [`RemoteOrLocalCallExecutor`](light::call_executor::RemoteOrLocalCallExecutor), combination -//! of the two. +//! - A [`RemoteOrLocalCallExecutor`](sc_client_api::light::LocalOrRemote), combination of the two. //! //! Additionally, the fourth generic parameter of the `Client` is a marker type representing //! the ways in which the runtime can interface with the outside. Any code that builds a `Client` diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 1025b9633887df63eb8f3f1682b5b24c0f246772..ba76f7a0fcf29ee6b1ae1ab1ba5924fdc08e1d0d 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -37,7 +37,8 @@ //! needed must be provided in the given directory. //! use std::{ - fs, collections::{HashMap, hash_map::DefaultHasher}, path::Path, + fs, collections::{HashMap, hash_map::DefaultHasher}, + path::{Path, PathBuf}, hash::Hasher as _, }; use sp_core::traits::FetchRuntimeCode; @@ -82,6 +83,29 @@ impl FetchRuntimeCode for WasmBlob { } } +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum WasmOverrideError { + #[error("Failed to get runtime version: {0}")] + VersionInvalid(String), + + #[error("WASM override IO error")] + Io(PathBuf, #[source] std::io::Error), + + #[error("Overwriting WASM requires a directory where local \ + WASM is stored. {} is not a directory", .0.display())] + NotADirectory(PathBuf), + + #[error("Duplicate WASM Runtimes found: \n{}\n", .0.join("\n") )] + DuplicateRuntime(Vec), +} + +impl From for sp_blockchain::Error { + fn from(err: WasmOverrideError) -> Self { + Self::Application(Box::new(err)) + } +} + /// Scrapes WASM from a folder and returns WASM from that folder /// if the runtime spec version matches. #[derive(Clone, Debug)] @@ -119,16 +143,13 @@ where /// Scrapes a folder for WASM runtimes. /// Returns a hashmap of the runtime version and wasm runtime code. fn scrape_overrides(dir: &Path, executor: &E) -> Result> { + let handle_err = |e: std::io::Error | -> sp_blockchain::Error { - sp_blockchain::Error::Msg(format!("{}", e.to_string())) + WasmOverrideError::Io(dir.to_owned(), e).into() }; if !dir.is_dir() { - return Err(sp_blockchain::Error::Msg(format!( - "Overwriting WASM requires a directory where \ - local WASM is stored. {:?} is not a directory", - dir, - ))); + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); } let mut overrides = HashMap::new(); @@ -149,9 +170,7 @@ where } if !duplicates.is_empty() { - let duplicate_file_list = duplicates.join("\n"); - let msg = format!("Duplicate WASM Runtimes found: \n{}\n", duplicate_file_list); - return Err(sp_blockchain::Error::Msg(msg)); + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); } Ok(overrides) @@ -164,7 +183,7 @@ where ) -> Result { let mut ext = BasicExternalities::default(); executor.runtime_version(&mut ext, &code.runtime_code(heap_pages)) - .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) + .map_err(|e| WasmOverrideError::VersionInvalid(format!("{:?}", e)).into()) } } @@ -236,14 +255,10 @@ mod tests { let scraped = WasmOverride::scrape_overrides(dir, exec); match scraped { - Err(e) => { - match e { - sp_blockchain::Error::Msg(msg) => { - let is_match = msg - .matches("Duplicate WASM Runtimes found") - .map(ToString::to_string) - .collect::>(); - assert!(is_match.len() >= 1) + Err(sp_blockchain::Error::Application(e)) => { + match e.downcast_ref::() { + Some(WasmOverrideError::DuplicateRuntime(duplicates)) => { + assert_eq!(duplicates.len(), 1); }, _ => panic!("Test should end with Msg Error Variant") } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 0caf05b2485db2409bd53288c1ce45fe983779b5..e360e610d490c5dab317eaaff6c281d714ddecb8 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -50,6 +50,8 @@ pub struct Configuration { pub network: NetworkConfiguration, /// Configuration for the keystore. pub keystore: KeystoreConfig, + /// Remote URI to connect to for async keystore support + pub keystore_remote: Option, /// Configuration for the database. pub database: DatabaseConfig, /// Size of internal state cache in Bytes @@ -103,6 +105,8 @@ pub struct Configuration { pub dev_key_seed: Option, /// Tracing targets pub tracing_targets: Option, + /// Is log filter reloading disabled + pub disable_log_reloading: bool, /// Tracing receiver pub tracing_receiver: sc_tracing::TracingReceiver, /// The size of the instances cache. diff --git a/client/service/src/error.rs b/client/service/src/error.rs index ffe1b39405501d224b7c56f40facb11776775f10..3515df78be876c32fbe1968e5613a9baab0f4465 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -27,25 +27,38 @@ use sp_blockchain; pub type Result = std::result::Result; /// Service errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] pub enum Error { - /// Client error. - Client(sp_blockchain::Error), - /// IO error. - Io(std::io::Error), - /// Consensus error. - Consensus(sp_consensus::Error), - /// Network error. - Network(sc_network::error::Error), - /// Keystore error. - Keystore(sc_keystore::Error), - /// Best chain selection strategy is missing. - #[display(fmt="Best chain selection strategy (SelectChain) is not provided.")] + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Consensus(#[from] sp_consensus::Error), + + #[error(transparent)] + Network(#[from] sc_network::error::Error), + + #[error(transparent)] + Keystore(#[from] sc_keystore::Error), + + #[error("Best chain selection strategy (SelectChain) is not provided.")] SelectChainRequired, - /// Tasks executor is missing. - #[display(fmt="Tasks executor hasn't been provided.")] + + #[error("Tasks executor hasn't been provided.")] TaskExecutorRequired, - /// Other error. + + #[error("Prometheus metrics error")] + Prometheus(#[from] prometheus_endpoint::PrometheusError), + + #[error("Application")] + Application(#[from] Box), + + #[error("Other: {0}")] Other(String), } @@ -55,21 +68,8 @@ impl<'a> From<&'a str> for Error { } } -impl From for Error { - fn from(e: prometheus_endpoint::PrometheusError) -> Self { - Error::Other(format!("Prometheus error: {}", e)) - } -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(err), - Error::Io(ref err) => Some(err), - Error::Consensus(ref err) => Some(err), - Error::Network(ref err) => Some(err), - Error::Keystore(ref err) => Some(err), - _ => None, - } +impl<'a> From for Error { + fn from(s: String) -> Self { + Error::Other(s) } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index a23ebf3d553d58573e8b960bc006f3cdd1019bcd..cd129de3260789774b2b37fa926ae42ec3f45108 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -73,7 +73,7 @@ pub use sc_executor::NativeExecutionDispatch; pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] pub use sc_network::config::{ - FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder, TransactionImport, + OnDemand, TransactionImport, TransactionImportFuture, }; pub use sc_tracing::TracingReceiver; @@ -250,8 +250,8 @@ async fn build_network_future< network.service().announce_block(notification.hash, Vec::new()); } - if let sp_consensus::BlockOrigin::Own = notification.origin { - network.service().own_block_imported( + if notification.is_new_best { + network.service().new_best_block_imported( notification.hash, notification.header.number().clone(), ); @@ -401,7 +401,7 @@ fn start_rpc_servers< >( config: &Configuration, mut gen_handler: H, - rpc_metrics: Option<&sc_rpc_server::RpcMetrics> + rpc_metrics: sc_rpc_server::RpcMetrics, ) -> Result, error::Error> { fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> where F: FnMut(&SocketAddr) -> Result, @@ -434,7 +434,7 @@ fn start_rpc_servers< config.rpc_ipc.as_ref().map(|path| sc_rpc_server::start_ipc( &*path, gen_handler( sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ipc") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ipc") ) )), maybe_start_server( @@ -444,7 +444,7 @@ fn start_rpc_servers< config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "http") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "http") ), ), )?.map(|s| waiting::HttpServer(Some(s))), @@ -456,7 +456,7 @@ fn start_rpc_servers< config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ws") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ws") ), ), )?.map(|s| waiting::WsServer(Some(s))), @@ -471,7 +471,7 @@ fn start_rpc_servers< >( _: &Configuration, _: H, - _: Option<&sc_rpc_server::RpcMetrics> + _: sc_rpc_server::RpcMetrics, ) -> Result, error::Error> { Ok(Box::new(())) } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 8a9f0ace171d88302a34509d2edcb159e649f83e..1f200b4cbeed6dca3dd096663e4ed08ee647767f 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -239,6 +239,7 @@ fn node_config( make_block_and_import(&first_service, first_user_data); } - network.full_nodes[0].1.network().update_chain(); + let info = network.full_nodes[0].1.client().info(); + network.full_nodes[0].1.network().new_best_block_imported(info.best_hash, info.best_number); network.full_nodes[0].3.clone() }; diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 4d3e736d9539e75c41cee79e20fd190c66d4283e..18facd720db250b7540723bd823198fd32ed78ab 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -13,8 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +thiserror = "1.0.21" parking_lot = "0.10.0" -log = "0.4.8" +log = "0.4.11" sc-client-api = { version = "2.0.0", path = "../api" } sp-core = { version = "2.0.0", path = "../../primitives/core" } codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 8da372db94ffc5d8f87cb9e97161efe1a631912d..81204365d0821479e580e7dd1b9abcd8ee29a2e8 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -13,6 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +thiserror = "1.0.21" jsonrpc-core = "15.0" jsonrpc-core-client = "15.0" jsonrpc-derive = "15.0" diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index fa433e5e31d2da8844aa4f01e95b93922c6fc47d..573610fb2f6102de728b377a742d864c297b3faa 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -17,6 +17,8 @@ //! A RPC handler to create sync states for light clients. //! Currently only usable with BABE + GRANDPA. +#![deny(unused_crate_dependencies)] + use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_blockchain::HeaderBackend; use std::sync::Arc; @@ -28,12 +30,27 @@ type SharedAuthoritySet = sc_finality_grandpa::SharedAuthoritySet<::Hash, NumberFor>; type SharedEpochChanges = sc_consensus_epochs::SharedEpochChanges; -struct Error(sp_blockchain::Error); +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +enum Error { + #[error(transparent)] + Blockchain(#[from] sp_blockchain::Error), + + #[error("Failed to load the block weight for block {0:?}")] + LoadingBlockWeightFailed(::Hash), + + #[error("JsonRpc error: {0}")] + JsonRpc(String), +} -impl From for jsonrpc_core::Error { - fn from(error: Error) -> Self { +impl From> for jsonrpc_core::Error { + fn from(error: Error) -> Self { + let message = match error { + Error::JsonRpc(s) => s, + _ => error.to_string(), + }; jsonrpc_core::Error { - message: error.0.to_string(), + message, code: jsonrpc_core::ErrorCode::ServerError(1), data: None, } @@ -76,20 +93,16 @@ impl SyncStateRpcHandler } } - fn build_sync_state(&self) -> Result, sp_blockchain::Error> { + fn build_sync_state(&self) -> Result, Error> { let finalized_hash = self.client.info().finalized_hash; let finalized_header = self.client.header(BlockId::Hash(finalized_hash))? - .ok_or_else(|| sp_blockchain::Error::Msg( - format!("Failed to get the header for block {:?}", finalized_hash) - ))?; + .ok_or_else(|| sp_blockchain::Error::MissingHeader(finalized_hash.to_string()))?; let finalized_block_weight = sc_consensus_babe::aux_schema::load_block_weight( - &*self.client, - finalized_hash, - )? - .ok_or_else(|| sp_blockchain::Error::Msg( - format!("Failed to load the block weight for block {:?}", finalized_hash) - ))?; + &*self.client, + finalized_hash, + )? + .ok_or_else(|| Error::LoadingBlockWeightFailed(finalized_hash))?; Ok(sc_chain_spec::LightSyncState { finalized_block_header: finalized_header, @@ -114,15 +127,16 @@ impl SyncStateRpcApi for SyncStateRpcHandler let mut chain_spec = self.chain_spec.cloned_box(); - let sync_state = self.build_sync_state().map_err(Error)?; + let sync_state = self.build_sync_state() + .map_err(map_error::>)?; chain_spec.set_light_sync_state(sync_state.to_serializable()); - let string = chain_spec.as_json(raw).map_err(map_error)?; + let string = chain_spec.as_json(raw).map_err(map_error::)?; - serde_json::from_str(&string).map_err(|err| map_error(err.to_string())) + serde_json::from_str(&string).map_err(|err| map_error::(err)) } } -fn map_error(error: String) -> jsonrpc_core::Error { - Error(sp_blockchain::Error::Msg(error)).into() +fn map_error(error: S) -> jsonrpc_core::Error { + Error::::JsonRpc(error.to_string()).into() } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index bff7842bec4f54b9a5b9cb8fa1e4912276280d35..98ed63886615b8abb8ca1638fd35f26d2506e9ac 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] parking_lot = "0.10.0" futures = "0.3.4" futures-timer = "3.0.1" -wasm-timer = "0.2.0" -libp2p = { version = "0.30.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +wasm-timer = "0.2.5" +libp2p = { version = "0.32.2", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/client/telemetry/src/async_record.rs b/client/telemetry/src/async_record.rs index 34b7c1435afa1a66991f912bb1b2bbb4244b8936..06650a54defd45ae8c4d651c0089444cd5a275ae 100644 --- a/client/telemetry/src/async_record.rs +++ b/client/telemetry/src/async_record.rs @@ -1,6 +1,6 @@ //! # Internal types to ssync drain slog //! FIXME: REMOVE THIS ONCE THE PR WAS MERGE -//! https://github.com/slog-rs/async/pull/14 +//! use slog::{Record, RecordStatic, Level, SingleKV, KV, BorrowedKV}; use slog::{Serializer, OwnedKVList, Key}; diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 35db326c9492986521f284f4083ae79c2553b231..28eeab0bdf713f377455ccdb15f7b58ddc10bbdc 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -13,15 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +ansi_term = "0.12.1" +tracing-log = "0.1.1" erased-serde = "0.3.9" +lazy_static = "1.4.0" log = { version = "0.4.8" } +once_cell = "1.4.1" parking_lot = "0.10.0" +regex = "1.4.2" rustc-hash = "1.1.0" serde = "1.0.101" serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } -tracing = "0.1.21" +tracing = "0.1.22" tracing-core = "0.1.17" -tracing-subscriber = "0.2.13" +tracing-subscriber = "0.2.15" sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } sc-telemetry = { version = "2.0.0", path = "../telemetry" } diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 6690f283464ea4674c91500f166e97d42b2025b8..f4017023eff19d3d68389554781d4644e39f0948 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -24,6 +24,8 @@ //! //! Currently we provide `Log` (default), `Telemetry` variants for `Receiver` +pub mod logging; + use rustc_hash::FxHashMap; use std::fmt; use std::time::{Duration, Instant}; @@ -37,12 +39,109 @@ use tracing::{ span::{Attributes, Id, Record}, subscriber::Subscriber, }; -use tracing_subscriber::{CurrentSpan, layer::{Layer, Context}}; +use tracing_subscriber::{ + fmt::time::ChronoLocal, + CurrentSpan, + EnvFilter, + layer::{self, Layer, Context}, + fmt as tracing_fmt, + Registry, +}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; +use tracing_subscriber::reload::Handle; +use once_cell::sync::OnceCell; +use tracing_subscriber::filter::Directive; + const ZERO_DURATION: Duration = Duration::from_nanos(0); +// The layered Subscriber as built up in `init_logger()`. +// Used in the reload `Handle`. +type SCSubscriber< + N = tracing_fmt::format::DefaultFields, + E = logging::EventFormat, + W = fn() -> std::io::Stderr +> = layer::Layered, Registry>; + +// Handle to reload the tracing log filter +static FILTER_RELOAD_HANDLE: OnceCell> = OnceCell::new(); +// Directives that are defaulted to when resetting the log filter +static DEFAULT_DIRECTIVES: OnceCell>> = OnceCell::new(); +// Current state of log filter +static CURRENT_DIRECTIVES: OnceCell>> = OnceCell::new(); + +/// Initialize FILTER_RELOAD_HANDLE, only possible once +pub fn set_reload_handle(handle: Handle) { + let _ = FILTER_RELOAD_HANDLE.set(handle); +} + +/// Add log filter directive(s) to the defaults +/// +/// The syntax is identical to the CLI `=`: +/// +/// `sync=debug,state=trace` +pub fn add_default_directives(directives: &str) { + DEFAULT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().push(directives.to_owned()); + add_directives(directives); +} + +/// Add directives to current directives +pub fn add_directives(directives: &str) { + CURRENT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().push(directives.to_owned()); +} + +/// Reload the logging filter with the supplied directives added to the existing directives +pub fn reload_filter() -> Result<(), String> { + let mut env_filter = EnvFilter::default(); + if let Some(current_directives) = CURRENT_DIRECTIVES.get() { + // Use join and then split in case any directives added together + for directive in current_directives.lock().join(",").split(',').map(|d| d.parse()) { + match directive { + Ok(dir) => env_filter = env_filter.add_directive(dir), + Err(invalid_directive) => { + log::warn!( + target: "tracing", + "Unable to parse directive while setting log filter: {:?}", + invalid_directive, + ); + } + } + } + } + env_filter = env_filter.add_directive( + "sc_tracing=trace" + .parse() + .expect("provided directive is valid"), + ); + log::debug!(target: "tracing", "Reloading log filter with: {}", env_filter); + FILTER_RELOAD_HANDLE.get() + .ok_or("No reload handle present".to_string())? + .reload(env_filter) + .map_err(|e| format!("{}", e)) +} + +/// Resets the log filter back to the original state when the node was started. +/// +/// Includes substrate defaults and CLI supplied directives. +pub fn reset_log_filter() -> Result<(), String> { + *CURRENT_DIRECTIVES + .get_or_init(|| Mutex::new(Vec::new())).lock() = + DEFAULT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().clone(); + reload_filter() +} + +/// Parse `Directive` and add to default directives if successful. +/// +/// Ensures the supplied directive will be restored when resetting the log filter. +pub fn parse_default_directive(directive: &str) -> Result { + let dir = directive + .parse() + .map_err(|_| format!("Unable to parse directive: {}", directive))?; + add_default_directives(directive); + Ok(dir) +} + /// Responsible for assigning ids to new spans, which are not re-used. pub struct ProfilingLayer { targets: Vec<(String, Level)>, @@ -231,15 +330,13 @@ impl ProfilingLayer { /// either with a level, eg: "pallet=trace" /// or without: "pallet" in which case the level defaults to `trace`. /// wasm_tracing indicates whether to enable wasm traces - pub fn new_with_handler(trace_handler: Box, targets: &str) - -> Self - { + pub fn new_with_handler(trace_handler: Box, targets: &str) -> Self { let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); Self { targets, trace_handler, span_data: Mutex::new(FxHashMap::default()), - current_span: Default::default() + current_span: Default::default(), } } @@ -461,7 +558,7 @@ mod tests { }; let layer = ProfilingLayer::new_with_handler( Box::new(handler), - "test_target" + "test_target", ); let subscriber = tracing_subscriber::fmt().finish().with(layer); (subscriber, spans, events) diff --git a/client/cli/src/logging.rs b/client/tracing/src/logging.rs similarity index 97% rename from client/cli/src/logging.rs rename to client/tracing/src/logging.rs index ffb4c3dfaafa17e2261d0537ffd81610f8391fc1..370b09f781b4ea92ee053bc5caf5392560a08b07 100644 --- a/client/cli/src/logging.rs +++ b/client/tracing/src/logging.rs @@ -79,12 +79,12 @@ impl<'a> MaybeColorWriter<'a> { } } -pub(crate) struct EventFormat { - pub(crate) timer: T, - pub(crate) display_target: bool, - pub(crate) display_level: bool, - pub(crate) display_thread_name: bool, - pub(crate) enable_color: bool, +pub struct EventFormat { + pub timer: T, + pub display_target: bool, + pub display_level: bool, + pub display_thread_name: bool, + pub enable_color: bool, } // NOTE: the following code took inspiration from tracing-subscriber @@ -147,7 +147,7 @@ where } } -pub(crate) struct NodeNameLayer; +pub struct NodeNameLayer; impl Layer for NodeNameLayer where diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 5db37f536838727fb5c2ac7ccab785e5ec6be365..a4d7bc685c99b44fdcbe745aea3b2ac28478235f 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "1.3.4" } -derive_more = "0.99.2" +thiserror = "1.0.21" futures = { version = "0.3.1", features = ["compat"] } futures-diagnose = "1.0" intervalier = "0.4.0" diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index c5850e765fcfa88d59db43af4d37bc6d30c27158..94c80c6f298a20628350c612c3cb9cc6965252ee 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" +thiserror = "1.0.21" futures = "0.3.4" log = "0.4.8" parking_lot = "0.10.0" diff --git a/client/transaction-pool/graph/src/error.rs b/client/transaction-pool/graph/src/error.rs deleted file mode 100644 index 392ddaa39be6f2d809792985b283cf5630d09792..0000000000000000000000000000000000000000 --- a/client/transaction-pool/graph/src/error.rs +++ /dev/null @@ -1,81 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Transaction pool errors. - -use sp_runtime::transaction_validity::{ - TransactionPriority as Priority, InvalidTransaction, UnknownTransaction, -}; - -/// Transaction pool result. -pub type Result = std::result::Result; - -/// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - /// Transaction is not verifiable yet, but might be in the future. - #[display(fmt="Unknown transaction validity: {:?}", _0)] - UnknownTransaction(UnknownTransaction), - /// Transaction is invalid. - #[display(fmt="Invalid transaction validity: {:?}", _0)] - InvalidTransaction(InvalidTransaction), - /// The transaction validity returned no "provides" tag. - /// - /// Such transactions are not accepted to the pool, since we use those tags - /// to define identity of transactions (occupance of the same "slot"). - #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] - NoTagsProvided, - /// The transaction is temporarily banned. - #[display(fmt="Temporarily Banned")] - TemporarilyBanned, - /// The transaction is already in the pool. - #[display(fmt="[{:?}] Already imported", _0)] - AlreadyImported(Box), - /// The transaction cannot be imported cause it's a replacement and has too low priority. - #[display(fmt="Too low priority ({} > {})", old, new)] - TooLowPriority { - /// Transaction already in the pool. - old: Priority, - /// Transaction entering the pool. - new: Priority - }, - /// Deps cycle detected and we couldn't import transaction. - #[display(fmt="Cycle Detected")] - CycleDetected, - /// Transaction was dropped immediately after it got inserted. - #[display(fmt="Transaction couldn't enter the pool because of the limit.")] - ImmediatelyDropped, - /// Invalid block id. - InvalidBlockId(String), -} - -impl std::error::Error for Error {} - -/// Transaction pool error conversion. -pub trait IntoPoolError: ::std::error::Error + Send + Sized { - /// Try to extract original `Error` - /// - /// This implementation is optional and used only to - /// provide more descriptive error messages for end users - /// of RPC API. - fn into_pool_error(self) -> ::std::result::Result { Err(self) } -} - -impl IntoPoolError for Error { - fn into_pool_error(self) -> ::std::result::Result { Ok(self) } -} diff --git a/client/transaction-pool/src/error.rs b/client/transaction-pool/src/error.rs index c0f795df1801a0c59313aac28346bb6797ff04b7..49fc433e320cc0be9fa3f3d905e77bc1c0369e83 100644 --- a/client/transaction-pool/src/error.rs +++ b/client/transaction-pool/src/error.rs @@ -24,30 +24,22 @@ use sp_transaction_pool::error::Error as TxPoolError; pub type Result = std::result::Result; /// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Pool error. - Pool(TxPoolError), - /// Blockchain error. - Blockchain(sp_blockchain::Error), - /// Error while converting a `BlockId`. - #[from(ignore)] + #[error("Transaction pool error")] + Pool(#[from] TxPoolError), + + #[error("Blockchain error")] + Blockchain(#[from] sp_blockchain::Error), + + #[error("Block conversion error: {0}")] BlockIdConversion(String), - /// Error while calling the runtime api. - #[from(ignore)] + + #[error("Runtime error: {0}")] RuntimeApi(String), } -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Pool(ref err) => Some(err), - Error::Blockchain(ref err) => Some(err), - Error::BlockIdConversion(_) => None, - Error::RuntimeApi(_) => None, - } - } -} impl sp_transaction_pool::error::IntoPoolError for Error { fn into_pool_error(self) -> std::result::Result { diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index b195d5c65706556b493ad0c7b1732a5759f47d02..a3837e16778652c8ef7e1bf30ea6ccf633c927a4 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -45,9 +45,6 @@ # Contracts /frame/contracts/ @athei -# EVM -/frame/evm/ @sorpaas - # NPoS and election /frame/staking/ @kianenigma /frame/elections/ @kianenigma diff --git a/docs/README.adoc b/docs/README.adoc index 7f3d50faac7d61b471637050f14df1911f5dc207..71052420b1aa9979b7fe907a7a9d2f5b72f34051 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -308,28 +308,6 @@ cargo run --release \-- \ Additional Substrate CLI usage options are available and may be shown by running `cargo run \-- --help`. -=== WASM binaries - -The WASM binaries are built during the normal `cargo build` process. To control the WASM binary building, -we support multiple environment variables: - -* `SKIP_WASM_BUILD` - Skips building any WASM binary. This is useful when only native should be recompiled. -* `BUILD_DUMMY_WASM_BINARY` - Builds dummy WASM binaries. These dummy binaries are empty and useful - for `cargo check` runs. -* `WASM_BUILD_TYPE` - Sets the build type for building WASM binaries. Supported values are `release` or `debug`. - By default the build type is equal to the build type used by the main build. -* `FORCE_WASM_BUILD` - Can be set to force a WASM build. On subsequent calls the value of the variable - needs to change. As WASM builder instructs `cargo` to watch for file changes - this environment variable should only be required in certain circumstances. -* `WASM_TARGET_DIRECTORY` - Will copy release build WASM binary to the given directory. The path needs - to be absolute. -* `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. -* `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. - -Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. -Where `PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will -be `NODE_RUNTIME`. - [[flaming-fir]] === Joining the Flaming Fir Testnet diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index d1742e567cfac2d56c3a5bf02763dd347afb13f9..380b561dba40796a7e697b82443254493c40a484 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -15,24 +15,34 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } # Needed for various traits. In our case, `OnFinalize`. sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. frame-support = { version = "2.0.0", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. frame-system = { version = "2.0.0", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-core = { version = "2.0.0", path = "../../primitives/core" } sp-std = { version = "2.0.0", path = "../../primitives/std" } sp-io = { version = "2.0.0", path = "../../primitives/io" } +pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } [features] default = ["std"] std = [ "serde", "codec/std", + "sp-std/std", "sp-runtime/std", "frame-support/std", "frame-system/std", + "frame-benchmarking/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-runtime/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] diff --git a/frame/assets/README.md b/frame/assets/README.md index 6b3fe21e52775b05a8e63f49c6ecd4e33b6a77f5..44c4eedc31be781f677e4d6847f13534df730103 100644 --- a/frame/assets/README.md +++ b/frame/assets/README.md @@ -11,9 +11,9 @@ with a fixed supply, including: * Asset Transfer * Asset Destruction -To use it in your runtime, you need to implement the assets [`Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). +To use it in your runtime, you need to implement the assets [`assets::Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). -The supported dispatchable functions are documented in the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. +The supported dispatchable functions are documented in the [`assets::Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. ### Terminology @@ -72,10 +72,10 @@ use pallet_assets as assets; use frame_support::{decl_module, dispatch, ensure}; use frame_system::ensure_signed; -pub trait Trait: assets::Trait { } +pub trait Config: assets::Config { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { let sender = ensure_signed(origin).map_err(|e| e.as_str())?; @@ -106,11 +106,11 @@ Below are assumptions that must be held when using this module. If any of them are violated, the behavior of this module is undefined. * The total count of assets should be less than - `Trait::AssetId::max_value()`. + `Config::AssetId::max_value()`. ## Related Modules * [`System`](https://docs.rs/frame-system/latest/frame_system/) * [`Support`](https://docs.rs/frame-support/latest/frame_support/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..cecb2ccae58b404bf2b0fa386af2d2d93fad53aa --- /dev/null +++ b/frame/assets/src/benchmarking.rs @@ -0,0 +1,298 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Assets pallet benchmarking. + +use super::*; +use sp_std::prelude::*; +use sp_runtime::traits::Bounded; +use frame_system::RawOrigin as SystemOrigin; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; + +use crate::Module as Assets; + +const SEED: u32 = 0; + +fn create_default_asset(max_zombies: u32) + -> (T::AccountId, ::Source) +{ + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let root = SystemOrigin::Root.into(); + assert!(Assets::::force_create( + root, + Default::default(), + caller_lookup.clone(), + max_zombies, + 1u32.into(), + ).is_ok()); + (caller, caller_lookup) +} + +fn create_default_minted_asset(max_zombies: u32, amount: T::Balance) + -> (T::AccountId, ::Source) +{ + let (caller, caller_lookup) = create_default_asset::(max_zombies); + assert!(Assets::::mint( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + caller_lookup.clone(), + amount, + ).is_ok()); + (caller, caller_lookup) +} + +fn add_zombies(minter: T::AccountId, n: u32) { + let origin = SystemOrigin::Signed(minter); + for i in 0..n { + let target = account("zombie", i, SEED); + let target_lookup = T::Lookup::unlookup(target); + assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); + } +} + +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +benchmarks! { + _ { } + + create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, 1, 1u32.into()) + verify { + assert_last_event::(RawEvent::Created(Default::default(), caller.clone(), caller).into()); + } + + force_create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + }: _(SystemOrigin::Root, Default::default(), caller_lookup, 1, 1u32.into()) + verify { + assert_last_event::(RawEvent::ForceCreated(Default::default(), caller).into()); + } + + destroy { + let z in 0 .. 10_000; + let (caller, _) = create_default_asset::(10_000); + add_zombies::(caller.clone(), z); + }: _(SystemOrigin::Signed(caller), Default::default(), 10_000) + verify { + assert_last_event::(RawEvent::Destroyed(Default::default()).into()); + } + + force_destroy { + let z in 0 .. 10_000; + let (caller, _) = create_default_asset::(10_000); + add_zombies::(caller.clone(), z); + }: _(SystemOrigin::Root, Default::default(), 10_000) + verify { + assert_last_event::(RawEvent::Destroyed(Default::default()).into()); + } + + mint { + let (caller, caller_lookup) = create_default_asset::(10); + let amount = T::Balance::from(100u32); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) + verify { + assert_last_event::(RawEvent::Issued(Default::default(), caller, amount).into()); + } + + burn { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(10, amount); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) + verify { + assert_last_event::(RawEvent::Burned(Default::default(), caller, amount).into()); + } + + transfer { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(10, amount); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) + verify { + assert_last_event::(RawEvent::Transferred(Default::default(), caller, target, amount).into()); + } + + force_transfer { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(10, amount); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, target_lookup, amount) + verify { + assert_last_event::(RawEvent::ForceTransferred(Default::default(), caller, target, amount).into()); + } + + freeze { + let (caller, caller_lookup) = create_default_minted_asset::(10, 100u32.into()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) + verify { + assert_last_event::(RawEvent::Frozen(Default::default(), caller).into()); + } + + thaw { + let (caller, caller_lookup) = create_default_minted_asset::(10, 100u32.into()); + assert!(Assets::::freeze( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + caller_lookup.clone() + ).is_ok()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) + verify { + assert_last_event::(RawEvent::Thawed(Default::default(), caller).into()); + } + + transfer_ownership { + let (caller, _) = create_default_asset::(10); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller), Default::default(), target_lookup) + verify { + assert_last_event::(RawEvent::OwnerChanged(Default::default(), target).into()); + } + + set_team { + let (caller, _) = create_default_asset::(10); + let target0 = T::Lookup::unlookup(account("target", 0, SEED)); + let target1 = T::Lookup::unlookup(account("target", 1, SEED)); + let target2 = T::Lookup::unlookup(account("target", 2, SEED)); + }: _(SystemOrigin::Signed(caller), Default::default(), target0.clone(), target1.clone(), target2.clone()) + verify { + assert_last_event::(RawEvent::TeamChanged( + Default::default(), + account("target", 0, SEED), + account("target", 1, SEED), + account("target", 2, SEED), + ).into()); + } + + set_max_zombies { + let (caller, _) = create_default_asset::(10); + let max_zombies: u32 = 100; + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller), Default::default(), max_zombies) + verify { + assert_last_event::(RawEvent::MaxZombiesChanged(Default::default(), max_zombies).into()); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + + #[test] + fn create() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_create::().is_ok()); + }); + } + + #[test] + fn force_create() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_force_create::().is_ok()); + }); + } + + #[test] + fn destroy() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_destroy::().is_ok()); + }); + } + + #[test] + fn force_destroy() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_force_destroy::().is_ok()); + }); + } + + #[test] + fn mint() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_mint::().is_ok()); + }); + } + + #[test] + fn burn() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_burn::().is_ok()); + }); + } + + #[test] + fn transfer() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_transfer::().is_ok()); + }); + } + + #[test] + fn force_transfer() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_force_transfer::().is_ok()); + }); + } + + #[test] + fn freeze() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_freeze::().is_ok()); + }); + } + + #[test] + fn thaw() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_thaw::().is_ok()); + }); + } + + #[test] + fn transfer_ownership() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_transfer_ownership::().is_ok()); + }); + } + + #[test] + fn set_team() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_set_team::().is_ok()); + }); + } + + #[test] + fn set_max_zombies() { + new_test_ext().execute_with(|| { + assert!(test_benchmark_set_max_zombies::().is_ok()); + }); + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index e5ad2ae352eb8b73bd4322e922e4e4c63e470b5e..df1cb87f75b2dee48f97cfac30dcc82581a91221 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -24,41 +24,72 @@ //! The Assets module provides functionality for asset management of fungible asset classes //! with a fixed supply, including: //! -//! * Asset Issuance -//! * Asset Transfer -//! * Asset Destruction +//! * Asset Issuance (Minting) +//! * Asset Transferal +//! * Asset Freezing +//! * Asset Destruction (Burning) //! -//! To use it in your runtime, you need to implement the assets [`Trait`](./trait.Trait.html). +//! To use it in your runtime, you need to implement the assets [`Config`](./trait.Config.html). //! //! The supported dispatchable functions are documented in the [`Call`](./enum.Call.html) enum. //! //! ### Terminology //! -//! * **Asset issuance:** The creation of a new asset, whose total supply will belong to the -//! account that issues the asset. -//! * **Asset transfer:** The action of transferring assets from one account to another. -//! * **Asset destruction:** The process of an account removing its entire holding of an asset. -//! * **Fungible asset:** An asset whose units are interchangeable. -//! * **Non-fungible asset:** An asset for which each unit has unique characteristics. +//! * **Admin**: An account ID uniquely privileged to be able to unfreeze (thaw) an account and it's +//! assets, as well as forcibly transfer a particular class of assets between arbitrary accounts +//! and reduce the balance of a particular class of assets of arbitrary accounts. +//! * **Asset issuance/minting**: The creation of a new asset, whose total supply will belong to the +//! account that issues the asset. This is a privileged operation. +//! * **Asset transfer**: The reduction of the balance of an asset of one account with the +//! corresponding increase in the balance of another. +//! * **Asset destruction**: The process of reduce the balance of an asset of one account. This is +//! a privileged operation. +//! * **Fungible asset**: An asset whose units are interchangeable. +//! * **Issuer**: An account ID uniquely privileged to be able to mint a particular class of assets. +//! * **Freezer**: An account ID uniquely privileged to be able to freeze an account from +//! transferring a particular class of assets. +//! * **Freezing**: Removing the possibility of an unpermissioned transfer of an asset from a +//! particular account. +//! * **Non-fungible asset**: An asset for which each unit has unique characteristics. +//! * **Owner**: An account ID uniquely privileged to be able to destroy a particular asset class, +//! or to set the Issuer, Freezer or Admin of that asset class. +//! * **Zombie**: An account which has a balance of some assets in this pallet, but no other +//! footprint on-chain, in particular no account managed in the `frame_system` pallet. //! //! ### Goals //! //! The assets system in Substrate is designed to make the following possible: //! -//! * Issue a unique asset to its creator's account. +//! * Issue a new assets in a permissioned or permissionless way, if permissionless, then with a +//! deposit required. +//! * Allow accounts to hold these assets without otherwise existing on-chain (*zombies*). //! * Move assets between accounts. -//! * Remove an account's balance of an asset when requested by that account's owner and update -//! the asset's total supply. +//! * Update the asset's total supply. +//! * Allow administrative activities by specially privileged accounts including freezing account +//! balances and minting/burning assets. //! //! ## Interface //! -//! ### Dispatchable Functions +//! ### Permissionless Functions //! -//! * `issue` - Issues the total supply of a new fungible asset to the account of the caller of the function. -//! * `transfer` - Transfers an `amount` of units of fungible asset `id` from the balance of -//! the function caller's account (`origin`) to a `target` account. -//! * `destroy` - Destroys the entire holding of a fungible asset `id` associated with the account -//! that called the function. +//! * `create`: Creates a new asset class, taking the required deposit. +//! * `transfer`: Transfer sender's assets to another account. +//! +//! ### Permissioned Functions +//! +//! * `force_create`: Creates a new asset class without taking any deposit. +//! * `force_destroy`: Destroys an asset class. +//! +//! ### Privileged Functions +//! * `destroy`: Destroys an entire asset class; called by the asset class's Owner. +//! * `mint`: Increases the asset balance of an account; called by the asset class's Issuer. +//! * `burn`: Decreases the asset balance of an account; called by the asset class's Admin. +//! * `force_transfer`: Transfers between arbitrary accounts; called by the asset class's Admin. +//! * `freeze`: Disallows further `transfer`s from an account; called by the asset class's Freezer. +//! * `thaw`: Allows further `transfer`s from an account; called by the asset class's Admin. +//! * `transfer_ownership`: Changes an asset class's Owner; called by the asset class's Owner. +//! * `set_team`: Changes an asset class's Admin, Freezer and Issuer; called by the asset class's +//! Owner. //! //! Please refer to the [`Call`](./enum.Call.html) enum and its associated variants for documentation on each function. //! @@ -70,61 +101,6 @@ //! //! Please refer to the [`Module`](./struct.Module.html) struct for details on publicly available functions. //! -//! ## Usage -//! -//! The following example shows how to use the Assets module in your runtime by exposing public functions to: -//! -//! * Issue a new fungible asset for a token distribution event (airdrop). -//! * Query the fungible asset holding balance of an account. -//! * Query the total supply of a fungible asset that has been issued. -//! -//! ### Prerequisites -//! -//! Import the Assets module and types and derive your runtime's configuration traits from the Assets module trait. -//! -//! ### Simple Code Snippet -//! -//! ```rust,ignore -//! use pallet_assets as assets; -//! use frame_support::{decl_module, dispatch, ensure}; -//! use frame_system::ensure_signed; -//! -//! pub trait Trait: assets::Trait { } -//! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { -//! let sender = ensure_signed(origin).map_err(|e| e.as_str())?; -//! -//! const ACCOUNT_ALICE: u64 = 1; -//! const ACCOUNT_BOB: u64 = 2; -//! const COUNT_AIRDROP_RECIPIENTS: u64 = 2; -//! const TOKENS_FIXED_SUPPLY: u64 = 100; -//! -//! ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), "Divide by zero error."); -//! -//! let asset_id = Self::next_asset_id(); -//! -//! >::mutate(|asset_id| *asset_id += 1); -//! >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); -//! >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); -//! >::insert(asset_id, TOKENS_FIXED_SUPPLY); -//! -//! Self::deposit_event(RawEvent::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY)); -//! Ok(()) -//! } -//! } -//! } -//! ``` -//! -//! ## Assumptions -//! -//! Below are assumptions that must be held when using this module. If any of -//! them are violated, the behavior of this module is undefined. -//! -//! * The total count of assets should be less than -//! `Trait::AssetId::max_value()`. -//! //! ## Related Modules //! //! * [`System`](../frame_system/index.html) @@ -133,148 +109,795 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{Parameter, decl_module, decl_event, decl_storage, decl_error, ensure}; -use sp_runtime::traits::{Member, AtLeast32Bit, AtLeast32BitUnsigned, Zero, StaticLookup}; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +pub mod weights; + +use sp_std::{fmt::Debug}; +use sp_runtime::{RuntimeDebug, traits::{ + Member, AtLeast32BitUnsigned, Zero, StaticLookup, Saturating, CheckedSub, CheckedAdd +}}; +use codec::{Encode, Decode, HasCompact}; +use frame_support::{Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, + traits::{Currency, ReservableCurrency, EnsureOrigin, Get, BalanceStatus::Reserved}, + dispatch::{DispatchResult, DispatchError}, +}; use frame_system::ensure_signed; -use sp_runtime::traits::One; +pub use weights::WeightInfo; + +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The module configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The units in which we record balances. type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; /// The arithmetic type of asset identifier. - type AssetId: Parameter + AtLeast32Bit + Default + Copy; -} + type AssetId: Member + Parameter + Default + Copy + HasCompact; -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; + /// The currency mechanism. + type Currency: ReservableCurrency; - fn deposit_event() = default; - /// Issue a new class of fungible assets. There are, and will only ever be, `total` - /// such assets and they'll all belong to the `origin` initially. It will have an - /// identifier `AssetId` instance: this will be specified in the `Issued` event. - /// - /// # - /// - `O(1)` - /// - 1 storage mutation (codec `O(1)`). - /// - 2 storage writes (condec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn issue(origin, #[compact] total: T::Balance) { - let origin = ensure_signed(origin)?; + /// The origin which may forcibly create or destroy an asset. + type ForceOrigin: EnsureOrigin; - let id = Self::next_asset_id(); - >::mutate(|id| *id += One::one()); + /// The basic amount of funds that must be reserved when creating a new asset class. + type AssetDepositBase: Get>; - >::insert((id, &origin), total); - >::insert(id, total); + /// The additional funds that must be reserved for every zombie account that an asset class + /// supports. + type AssetDepositPerZombie: Get>; - Self::deposit_event(RawEvent::Issued(id, origin, total)); - } + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} - /// Move some assets from one holder to another. - /// - /// # - /// - `O(1)` - /// - 1 static lookup - /// - 2 storage mutations (codec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn transfer(origin, - #[compact] id: T::AssetId, - target: ::Source, - #[compact] amount: T::Balance - ) { - let origin = ensure_signed(origin)?; - let origin_account = (id, origin.clone()); - let origin_balance = >::get(&origin_account); - let target = T::Lookup::lookup(target)?; - ensure!(!amount.is_zero(), Error::::AmountZero); - ensure!(origin_balance >= amount, Error::::BalanceLow); +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct AssetDetails< + Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, + AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq, + DepositBalance: Encode + Decode + Clone + Debug + Eq + PartialEq, +> { + /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + owner: AccountId, + /// Can mint tokens. + issuer: AccountId, + /// Can thaw tokens, force transfers and burn tokens from any account. + admin: AccountId, + /// Can freeze tokens. + freezer: AccountId, + /// The total supply across all accounts. + supply: Balance, + /// The balance deposited for this asset. + /// + /// This pays for the data stored here together with any virtual accounts. + deposit: DepositBalance, + /// The number of balance-holding accounts that this asset may have, excluding those that were + /// created when they had a system-level ED. + max_zombies: u32, + /// The ED for virtual accounts. + min_balance: Balance, + /// The current number of zombie accounts. + zombies: u32, + /// The total number of accounts. + accounts: u32, +} - Self::deposit_event(RawEvent::Transferred(id, origin, target.clone(), amount)); - >::insert(origin_account, origin_balance - amount); - >::mutate((id, target), |balance| *balance += amount); - } +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct AssetBalance< + Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, +> { + /// The balance. + balance: Balance, + /// Whether the account is frozen. + is_frozen: bool, + /// Whether the account is a zombie. If not, then it has a reference. + is_zombie: bool, +} - /// Destroy any assets of `id` owned by `origin`. - /// - /// # - /// - `O(1)` - /// - 1 storage mutation (codec `O(1)`). - /// - 1 storage deletion (codec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn destroy(origin, #[compact] id: T::AssetId) { - let origin = ensure_signed(origin)?; - let balance = >::take((id, &origin)); - ensure!(!balance.is_zero(), Error::::BalanceZero); +decl_storage! { + trait Store for Module as Assets { + /// Details of an asset. + Asset: map hasher(blake2_128_concat) T::AssetId => Option, + >>; - >::mutate(id, |total_supply| *total_supply -= balance); - Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); - } + /// The number of units of assets held by any given account. + Account: double_map + hasher(blake2_128_concat) T::AssetId, + hasher(blake2_128_concat) T::AccountId + => AssetBalance; } } decl_event! { pub enum Event where - ::AccountId, - ::Balance, - ::AssetId, + ::AccountId, + ::Balance, + ::AssetId, { + /// Some asset class was created. \[asset_id, creator, owner\] + Created(AssetId, AccountId, AccountId), /// Some assets were issued. \[asset_id, owner, total_supply\] Issued(AssetId, AccountId, Balance), /// Some assets were transferred. \[asset_id, from, to, amount\] Transferred(AssetId, AccountId, AccountId, Balance), /// Some assets were destroyed. \[asset_id, owner, balance\] - Destroyed(AssetId, AccountId, Balance), + Burned(AssetId, AccountId, Balance), + /// The management team changed \[asset_id, issuer, admin, freezer\] + TeamChanged(AssetId, AccountId, AccountId, AccountId), + /// The owner changed \[asset_id, owner\] + OwnerChanged(AssetId, AccountId), + /// Some assets was transferred by an admin. \[asset_id, from, to, amount\] + ForceTransferred(AssetId, AccountId, AccountId, Balance), + /// Some account `who` was frozen. \[asset_id, who\] + Frozen(AssetId, AccountId), + /// Some account `who` was thawed. \[asset_id, who\] + Thawed(AssetId, AccountId), + /// An asset class was destroyed. + Destroyed(AssetId), + /// Some asset class was force-created. \[asset_id, owner\] + ForceCreated(AssetId, AccountId), + /// The maximum amount of zombies allowed has changed. \[asset_id, max_zombies\] + MaxZombiesChanged(AssetId, u32), } } decl_error! { - pub enum Error for Module { - /// Transfer amount should be non-zero + pub enum Error for Module { + /// Transfer amount should be non-zero. AmountZero, - /// Account balance must be greater than or equal to the transfer amount + /// Account balance must be greater than or equal to the transfer amount. BalanceLow, - /// Balance should be non-zero + /// Balance should be non-zero. BalanceZero, + /// The signing account has no permission to do the operation. + NoPermission, + /// The given asset ID is unknown. + Unknown, + /// The origin account is frozen. + Frozen, + /// The asset ID is already taken. + InUse, + /// Too many zombie accounts in use. + TooManyZombies, + /// Attempt to destroy an asset class when non-zombie, reference-bearing accounts exist. + RefsLeft, + /// Invalid witness data given. + BadWitness, + /// Minimum balance should be non-zero. + MinBalanceZero, + /// A mint operation lead to an overflow. + Overflow, } } -decl_storage! { - trait Store for Module as Assets { - /// The number of units of assets held by any given account. - Balances: map hasher(blake2_128_concat) (T::AssetId, T::AccountId) => T::Balance; - /// The next asset identifier up for grabs. - NextAssetId get(fn next_asset_id): T::AssetId; - /// The total unit supply of an asset. +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Issue a new class of fungible assets from a public origin. /// - /// TWOX-NOTE: `AssetId` is trusted, so this is safe. - TotalSupply: map hasher(twox_64_concat) T::AssetId => T::Balance; + /// This new asset class has no assets initially. + /// + /// The origin must be Signed and the sender must have sufficient funds free. + /// + /// Funds of sender are reserved according to the formula: + /// `AssetDepositBase + AssetDepositPerZombie * max_zombies`. + /// + /// Parameters: + /// - `id`: The identifier of the new asset. This must not be currently in use to identify + /// an existing asset. + /// - `owner`: The owner of this class of assets. The owner has full superuser permissions + /// over this asset, but may later change and configure the permissions using `transfer_ownership` + /// and `set_team`. + /// - `max_zombies`: The total number of accounts which may hold assets in this class yet + /// have no existential deposit. + /// - `min_balance`: The minimum balance of this new asset that any single account must + /// have. If an account's balance is reduced below this, then it collapses to zero. + /// + /// Emits `Created` event when successful. + /// + /// Weight: `O(1)` + #[weight = T::WeightInfo::create()] + fn create(origin, + #[compact] id: T::AssetId, + admin: ::Source, + max_zombies: u32, + min_balance: T::Balance, + ) { + let owner = ensure_signed(origin)?; + let admin = T::Lookup::lookup(admin)?; + + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + + let deposit = T::AssetDepositPerZombie::get() + .saturating_mul(max_zombies.into()) + .saturating_add(T::AssetDepositBase::get()); + T::Currency::reserve(&owner, deposit)?; + + Asset::::insert(id, AssetDetails { + owner: owner.clone(), + issuer: admin.clone(), + admin: admin.clone(), + freezer: admin.clone(), + supply: Zero::zero(), + deposit, + max_zombies, + min_balance, + zombies: Zero::zero(), + accounts: Zero::zero(), + }); + Self::deposit_event(RawEvent::Created(id, owner, admin)); + } + + /// Issue a new class of fungible assets from a privileged origin. + /// + /// This new asset class has no assets initially. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// Unlike `create`, no funds are reserved. + /// + /// - `id`: The identifier of the new asset. This must not be currently in use to identify + /// an existing asset. + /// - `owner`: The owner of this class of assets. The owner has full superuser permissions + /// over this asset, but may later change and configure the permissions using `transfer_ownership` + /// and `set_team`. + /// - `max_zombies`: The total number of accounts which may hold assets in this class yet + /// have no existential deposit. + /// - `min_balance`: The minimum balance of this new asset that any single account must + /// have. If an account's balance is reduced below this, then it collapses to zero. + /// + /// Emits `ForceCreated` event when successful. + /// + /// Weight: `O(1)` + #[weight = T::WeightInfo::force_create()] + fn force_create(origin, + #[compact] id: T::AssetId, + owner: ::Source, + #[compact] max_zombies: u32, + #[compact] min_balance: T::Balance, + ) { + T::ForceOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; + + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + + Asset::::insert(id, AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + max_zombies, + min_balance, + zombies: Zero::zero(), + accounts: Zero::zero(), + }); + Self::deposit_event(RawEvent::ForceCreated(id, owner)); + } + + /// Destroy a class of fungible assets owned by the sender. + /// + /// The origin must be Signed and the sender must be the owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be destroyed. This must identify an existing + /// asset. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(z)` where `z` is the number of zombie accounts. + #[weight = T::WeightInfo::destroy(*zombies_witness)] + fn destroy(origin, + #[compact] id: T::AssetId, + #[compact] zombies_witness: u32, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Asset::::try_mutate_exists(id, |maybe_details| { + let details = maybe_details.take().ok_or(Error::::Unknown)?; + ensure!(details.owner == origin, Error::::NoPermission); + ensure!(details.accounts == details.zombies, Error::::RefsLeft); + ensure!(details.zombies <= zombies_witness, Error::::BadWitness); + T::Currency::unreserve(&details.owner, details.deposit); + + *maybe_details = None; + Account::::remove_prefix(&id); + Self::deposit_event(RawEvent::Destroyed(id)); + Ok(()) + }) + } + + /// Destroy a class of fungible assets. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// - `id`: The identifier of the asset to be destroyed. This must identify an existing + /// asset. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(1)` + #[weight = T::WeightInfo::force_destroy(*zombies_witness)] + fn force_destroy(origin, + #[compact] id: T::AssetId, + #[compact] zombies_witness: u32, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + + Asset::::try_mutate_exists(id, |maybe_details| { + let details = maybe_details.take().ok_or(Error::::Unknown)?; + ensure!(details.accounts == details.zombies, Error::::RefsLeft); + ensure!(details.zombies <= zombies_witness, Error::::BadWitness); + T::Currency::unreserve(&details.owner, details.deposit); + + *maybe_details = None; + Account::::remove_prefix(&id); + Self::deposit_event(RawEvent::Destroyed(id)); + Ok(()) + }) + } + + /// Mint assets of a particular class. + /// + /// The origin must be Signed and the sender must be the Issuer of the asset `id`. + /// + /// - `id`: The identifier of the asset to have some amount minted. + /// - `beneficiary`: The account to be credited with the minted assets. + /// - `amount`: The amount of the asset to be minted. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(1)` + /// Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`. + #[weight = T::WeightInfo::mint()] + fn mint(origin, + #[compact] id: T::AssetId, + beneficiary: ::Source, + #[compact] amount: T::Balance + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + ensure!(&origin == &details.issuer, Error::::NoPermission); + details.supply = details.supply.checked_add(&amount).ok_or(Error::::Overflow)?; + + Account::::try_mutate(id, &beneficiary, |t| -> DispatchResult { + let new_balance = t.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, Error::::BalanceLow); + if t.balance.is_zero() { + t.is_zombie = Self::new_account(&beneficiary, details)?; + } + t.balance = new_balance; + Ok(()) + })?; + Self::deposit_event(RawEvent::Issued(id, beneficiary, amount)); + Ok(()) + }) + } + + /// Reduce the balance of `who` by as much as possible up to `amount` assets of `id`. + /// + /// Origin must be Signed and the sender should be the Manager of the asset `id`. + /// + /// Bails with `BalanceZero` if the `who` is already dead. + /// + /// - `id`: The identifier of the asset to have some amount burned. + /// - `who`: The account to be debited from. + /// - `amount`: The maximum amount by which `who`'s balance should be reduced. + /// + /// Emits `Burned` with the actual amount burned. If this takes the balance to below the + /// minimum for the asset, then the amount burned is increased to take it to zero. + /// + /// Weight: `O(1)` + /// Modes: Post-existence of `who`; Pre & post Zombie-status of `who`. + #[weight = T::WeightInfo::burn()] + fn burn(origin, + #[compact] id: T::AssetId, + who: ::Source, + #[compact] amount: T::Balance + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; + + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.admin, Error::::NoPermission); + + let burned = Account::::try_mutate_exists( + id, + &who, + |maybe_account| -> Result { + let mut account = maybe_account.take().ok_or(Error::::BalanceZero)?; + let mut burned = amount.min(account.balance); + account.balance -= burned; + *maybe_account = if account.balance < d.min_balance { + burned += account.balance; + Self::dead_account(&who, d, account.is_zombie); + None + } else { + Some(account) + }; + Ok(burned) + } + )?; + + d.supply = d.supply.saturating_sub(burned); + + Self::deposit_event(RawEvent::Burned(id, who, burned)); + Ok(()) + }) + } + + /// Move some assets from the sender account to another. + /// + /// Origin must be Signed. + /// + /// - `id`: The identifier of the asset to have some amount transferred. + /// - `target`: The account to be credited. + /// - `amount`: The amount by which the sender's balance of assets should be reduced and + /// `target`'s balance increased. The amount actually transferred may be slightly greater in + /// the case that the transfer would otherwise take the sender balance above zero but below + /// the minimum balance. Must be greater than zero. + /// + /// Emits `Transferred` with the actual amount transferred. If this takes the source balance + /// to below the minimum for the asset, then the amount transferred is increased to take it + /// to zero. + /// + /// Weight: `O(1)` + /// Modes: Pre-existence of `target`; Post-existence of sender; Prior & post zombie-status + /// of sender; Account pre-existence of `target`. + #[weight = T::WeightInfo::transfer()] + fn transfer(origin, + #[compact] id: T::AssetId, + target: ::Source, + #[compact] amount: T::Balance + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + ensure!(!amount.is_zero(), Error::::AmountZero); + + let mut origin_account = Account::::get(id, &origin); + ensure!(!origin_account.is_frozen, Error::::Frozen); + origin_account.balance = origin_account.balance.checked_sub(&amount) + .ok_or(Error::::BalanceLow)?; + + let dest = T::Lookup::lookup(target)?; + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + if dest == origin { + return Ok(()) + } + + let mut amount = amount; + if origin_account.balance < details.min_balance { + amount += origin_account.balance; + origin_account.balance = Zero::zero(); + } + + Account::::try_mutate(id, &dest, |a| -> DispatchResult { + let new_balance = a.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, Error::::BalanceLow); + if a.balance.is_zero() { + a.is_zombie = Self::new_account(&dest, details)?; + } + a.balance = new_balance; + Ok(()) + })?; + + match origin_account.balance.is_zero() { + false => { + Self::dezombify(&origin, details, &mut origin_account.is_zombie); + Account::::insert(id, &origin, &origin_account) + } + true => { + Self::dead_account(&origin, details, origin_account.is_zombie); + Account::::remove(id, &origin); + } + } + + Self::deposit_event(RawEvent::Transferred(id, origin, dest, amount)); + Ok(()) + }) + } + + /// Move some assets from one account to another. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to have some amount transferred. + /// - `source`: The account to be debited. + /// - `dest`: The account to be credited. + /// - `amount`: The amount by which the `source`'s balance of assets should be reduced and + /// `dest`'s balance increased. The amount actually transferred may be slightly greater in + /// the case that the transfer would otherwise take the `source` balance above zero but + /// below the minimum balance. Must be greater than zero. + /// + /// Emits `Transferred` with the actual amount transferred. If this takes the source balance + /// to below the minimum for the asset, then the amount transferred is increased to take it + /// to zero. + /// + /// Weight: `O(1)` + /// Modes: Pre-existence of `dest`; Post-existence of `source`; Prior & post zombie-status + /// of `source`; Account pre-existence of `dest`. + #[weight = T::WeightInfo::force_transfer()] + fn force_transfer(origin, + #[compact] id: T::AssetId, + source: ::Source, + dest: ::Source, + #[compact] amount: T::Balance, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let source = T::Lookup::lookup(source)?; + let mut source_account = Account::::get(id, &source); + let mut amount = amount.min(source_account.balance); + ensure!(!amount.is_zero(), Error::::AmountZero); + + let dest = T::Lookup::lookup(dest)?; + if dest == source { + return Ok(()) + } + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.admin, Error::::NoPermission); + + source_account.balance -= amount; + if source_account.balance < details.min_balance { + amount += source_account.balance; + source_account.balance = Zero::zero(); + } + + Account::::try_mutate(id, &dest, |a| -> DispatchResult { + let new_balance = a.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, Error::::BalanceLow); + if a.balance.is_zero() { + a.is_zombie = Self::new_account(&dest, details)?; + } + a.balance = new_balance; + Ok(()) + })?; + + match source_account.balance.is_zero() { + false => { + Self::dezombify(&source, details, &mut source_account.is_zombie); + Account::::insert(id, &source, &source_account) + } + true => { + Self::dead_account(&source, details, source_account.is_zombie); + Account::::remove(id, &source); + } + } + + Self::deposit_event(RawEvent::ForceTransferred(id, source, dest, amount)); + Ok(()) + }) + } + + /// Disallow further unprivileged transfers from an account. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `who`: The account to be frozen. + /// + /// Emits `Frozen`. + /// + /// Weight: `O(1)` + #[weight = T::WeightInfo::freeze()] + fn freeze(origin, #[compact] id: T::AssetId, who: ::Source) { + let origin = ensure_signed(origin)?; + + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.freezer, Error::::NoPermission); + let who = T::Lookup::lookup(who)?; + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + + Account::::mutate(id, &who, |a| a.is_frozen = true); + + Self::deposit_event(Event::::Frozen(id, who)); + } + + /// Allow unprivileged transfers from an account again. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `who`: The account to be unfrozen. + /// + /// Emits `Thawed`. + /// + /// Weight: `O(1)` + #[weight = T::WeightInfo::thaw()] + fn thaw(origin, #[compact] id: T::AssetId, who: ::Source) { + let origin = ensure_signed(origin)?; + + let details = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &details.admin, Error::::NoPermission); + let who = T::Lookup::lookup(who)?; + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + + Account::::mutate(id, &who, |a| a.is_frozen = false); + + Self::deposit_event(Event::::Thawed(id, who)); + } + + /// Change the Owner of an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `owner`: The new Owner of this asset. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[weight = T::WeightInfo::transfer_ownership()] + fn transfer_ownership(origin, + #[compact] id: T::AssetId, + owner: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + if details.owner == owner { return Ok(()) } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved(&details.owner, &owner, details.deposit, Reserved)?; + + details.owner = owner.clone(); + + Self::deposit_event(RawEvent::OwnerChanged(id, owner)); + Ok(()) + }) + } + + /// Change the Issuer, Admin and Freezer of an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `issuer`: The new Issuer of this asset. + /// - `admin`: The new Admin of this asset. + /// - `freezer`: The new Freezer of this asset. + /// + /// Emits `TeamChanged`. + /// + /// Weight: `O(1)` + #[weight = T::WeightInfo::set_team()] + fn set_team(origin, + #[compact] id: T::AssetId, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let issuer = T::Lookup::lookup(issuer)?; + let admin = T::Lookup::lookup(admin)?; + let freezer = T::Lookup::lookup(freezer)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + + details.issuer = issuer.clone(); + details.admin = admin.clone(); + details.freezer = freezer.clone(); + + Self::deposit_event(RawEvent::TeamChanged(id, issuer, admin, freezer)); + Ok(()) + }) + } + + #[weight = T::WeightInfo::set_max_zombies()] + fn set_max_zombies(origin, + #[compact] id: T::AssetId, + #[compact] max_zombies: u32, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + ensure!(max_zombies >= details.zombies, Error::::TooManyZombies); + + let new_deposit = T::AssetDepositPerZombie::get() + .saturating_mul(max_zombies.into()) + .saturating_add(T::AssetDepositBase::get()); + + if new_deposit > details.deposit { + T::Currency::reserve(&origin, new_deposit - details.deposit)?; + } else { + T::Currency::unreserve(&origin, details.deposit - new_deposit); + } + + details.max_zombies = max_zombies; + + Self::deposit_event(RawEvent::MaxZombiesChanged(id, max_zombies)); + Ok(()) + }) + } } } // The main implementation block for the module. -impl Module { +impl Module { // Public immutables /// Get the asset `id` balance of `who`. pub fn balance(id: T::AssetId, who: T::AccountId) -> T::Balance { - >::get((id, who)) + Account::::get(id, who).balance } /// Get the total supply of an asset `id`. pub fn total_supply(id: T::AssetId) -> T::Balance { - >::get(id) + Asset::::get(id).map(|x| x.supply).unwrap_or_else(Zero::zero) + } + + /// Check the number of zombies allow yet for an asset. + pub fn zombie_allowance(id: T::AssetId) -> u32 { + Asset::::get(id).map(|x| x.max_zombies - x.zombies).unwrap_or_else(Zero::zero) + } + + fn new_account( + who: &T::AccountId, + d: &mut AssetDetails>, + ) -> Result { + let accounts = d.accounts.checked_add(1).ok_or(Error::::Overflow)?; + let r = Ok(if frame_system::Module::::account_exists(who) { + frame_system::Module::::inc_ref(who); + false + } else { + ensure!(d.zombies < d.max_zombies, Error::::TooManyZombies); + d.zombies += 1; + true + }); + d.accounts = accounts; + r + } + + /// If `who`` exists in system and it's a zombie, dezombify it. + fn dezombify( + who: &T::AccountId, + d: &mut AssetDetails>, + is_zombie: &mut bool, + ) { + if *is_zombie && frame_system::Module::::account_exists(who) { + frame_system::Module::::inc_ref(who); + *is_zombie = false; + d.zombies = d.zombies.saturating_sub(1); + } + } + + fn dead_account( + who: &T::AccountId, + d: &mut AssetDetails>, + is_zombie: bool, + ) { + if is_zombie { + d.zombies = d.zombies.saturating_sub(1); + } else { + frame_system::Module::::dec_ref(who); + } + d.accounts = d.accounts.saturating_sub(1); } } @@ -282,9 +905,21 @@ impl Module { mod tests { use super::*; - use frame_support::{impl_outer_origin, assert_ok, assert_noop, parameter_types, weights::Weight}; + use frame_support::{impl_outer_origin, assert_ok, assert_noop, parameter_types, impl_outer_event}; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + + mod pallet_assets { + pub use crate::Event; + } + + impl_outer_event! { + pub enum Event for Test { + frame_system, + pallet_balances, + pallet_assets, + } + } impl_outer_origin! { pub enum Origin for Test where system = frame_system {} @@ -294,12 +929,12 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type Call = (); @@ -309,45 +944,203 @@ mod tests { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); - type AccountData = (); + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); } - impl Trait for Test { - type Event = (); + + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + + impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + } + + parameter_types! { + pub const AssetDepositBase: u64 = 1; + pub const AssetDepositPerZombie: u64 = 1; + } + + impl Config for Test { + type Currency = Balances; + type Event = Event; type Balance = u64; type AssetId = u32; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDepositBase = AssetDepositBase; + type AssetDepositPerZombie = AssetDepositPerZombie; + type WeightInfo = (); } + type System = frame_system::Module; + type Balances = pallet_balances::Module; type Assets = Module; - fn new_test_ext() -> sp_io::TestExternalities { + pub(crate) fn new_test_ext() -> sp_io::TestExternalities { frame_system::GenesisConfig::default().build_storage::().unwrap().into() } #[test] - fn issuing_asset_units_to_issuer_should_work() { + fn basic_minting_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_eq!(Assets::balance(0, 2), 100); + }); + } + + #[test] + fn lifecycle_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 10, 1)); + assert_eq!(Balances::reserved_balance(&1), 11); + + assert_ok!(Assets::destroy(Origin::signed(1), 0, 100)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 10, 1)); + assert_eq!(Balances::reserved_balance(&1), 11); + + assert_ok!(Assets::force_destroy(Origin::root(), 0, 100)); + assert_eq!(Balances::reserved_balance(&1), 0); + }); + } + + #[test] + fn destroy_with_non_zombies_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_noop!(Assets::destroy(Origin::signed(1), 0, 100), Error::::RefsLeft); + assert_noop!(Assets::force_destroy(Origin::root(), 0, 100), Error::::RefsLeft); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::destroy(Origin::signed(1), 0, 100)); + }); + } + + #[test] + fn destroy_with_bad_witness_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_noop!(Assets::destroy(Origin::signed(1), 0, 0), Error::::BadWitness); + assert_noop!(Assets::force_destroy(Origin::root(), 0, 0), Error::::BadWitness); + }); + } + + #[test] + fn max_zombies_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 2, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 0, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + + assert_eq!(Assets::zombie_allowance(0), 0); + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 100), Error::::TooManyZombies); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::TooManyZombies); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 50), Error::::TooManyZombies); + + Balances::make_free_balance_be(&3, 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 3, 100)); + + assert_ok!(Assets::transfer(Origin::signed(0), 0, 1, 100)); + assert_eq!(Assets::zombie_allowance(0), 1); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + }); + } + + #[test] + fn resetting_max_zombies_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 2, 1)); + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 3, 100)); + + assert_eq!(Assets::zombie_allowance(0), 0); + + assert_noop!(Assets::set_max_zombies(Origin::signed(1), 0, 1), Error::::TooManyZombies); + + assert_ok!(Assets::set_max_zombies(Origin::signed(1), 0, 3)); + assert_eq!(Assets::zombie_allowance(0), 1); + }); + } + + #[test] + fn dezombifying_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::zombie_allowance(0), 9); + + // introduce a bit of balance for account 2. + Balances::make_free_balance_be(&2, 100); + + // transfer 25 units, nothing changes. + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 25)); + assert_eq!(Assets::zombie_allowance(0), 9); + + // introduce a bit of balance; this will create the account. + Balances::make_free_balance_be(&1, 100); + + // now transferring 25 units will create it. + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 25)); + assert_eq!(Assets::zombie_allowance(0), 10); + }); + } + + #[test] + fn min_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + // Cannot create a new account with a balance that is below minimum... + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), Error::::BalanceLow); + + // When deducting from an account to below minimum, it should be reaped. + + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); + assert!(Assets::balance(0, 1).is_zero()); + assert_eq!(Assets::balance(0, 2), 100); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 2, 1, 91)); + assert!(Assets::balance(0, 2).is_zero()); assert_eq!(Assets::balance(0, 1), 100); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, 91)); + assert!(Assets::balance(0, 1).is_zero()); + assert_eq!(Asset::::get(0).unwrap().accounts, 0); }); } #[test] fn querying_total_supply_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); @@ -356,15 +1149,16 @@ mod tests { assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 19); assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::destroy(Origin::signed(3), 0)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 3, u64::max_value())); assert_eq!(Assets::total_supply(0), 69); }); } #[test] - fn transferring_amount_above_available_balance_should_work() { + fn transferring_amount_below_available_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); @@ -372,24 +1166,106 @@ mod tests { }); } + #[test] + fn transferring_frozen_balance_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 1)); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw(Origin::signed(1), 0, 1)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + }); + } + + #[test] + fn origin_guards_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_noop!(Assets::transfer_ownership(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!(Assets::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Assets::freeze(Origin::signed(2), 0, 1), Error::::NoPermission); + assert_noop!(Assets::thaw(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!(Assets::mint(Origin::signed(2), 0, 2, 100), Error::::NoPermission); + assert_noop!(Assets::burn(Origin::signed(2), 0, 1, 100), Error::::NoPermission); + assert_noop!(Assets::force_transfer(Origin::signed(2), 0, 1, 2, 100), Error::::NoPermission); + assert_noop!(Assets::set_max_zombies(Origin::signed(2), 0, 11), Error::::NoPermission); + assert_noop!(Assets::destroy(Origin::signed(2), 0, 100), Error::::NoPermission); + }); + } + + #[test] + fn transfer_owner_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 1); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 10, 1)); + + assert_eq!(Balances::reserved_balance(&1), 11); + + assert_ok!(Assets::transfer_ownership(Origin::signed(1), 0, 2)); + assert_eq!(Balances::reserved_balance(&2), 11); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert_noop!(Assets::transfer_ownership(Origin::signed(1), 0, 1), Error::::NoPermission); + + assert_ok!(Assets::transfer_ownership(Origin::signed(2), 0, 1)); + assert_eq!(Balances::reserved_balance(&1), 11); + assert_eq!(Balances::reserved_balance(&2), 0); + }); + } + + #[test] + fn set_team_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Assets::mint(Origin::signed(2), 0, 2, 100)); + assert_ok!(Assets::freeze(Origin::signed(4), 0, 2)); + assert_ok!(Assets::thaw(Origin::signed(3), 0, 2)); + assert_ok!(Assets::force_transfer(Origin::signed(3), 0, 2, 3, 100)); + assert_ok!(Assets::burn(Origin::signed(3), 0, 3, 100)); + }); + } + + #[test] + fn transferring_to_frozen_account_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_eq!(Assets::balance(0, 2), 100); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 2)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 2), 150); + }); + } + #[test] fn transferring_amount_more_than_available_balance_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::max_value())); assert_eq!(Assets::balance(0, 1), 0); assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::BalanceLow); + assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 51), Error::::BalanceLow); }); } #[test] fn transferring_less_than_one_unit_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 0), Error::::AmountZero); }); @@ -398,27 +1274,31 @@ mod tests { #[test] fn transferring_more_units_than_total_supply_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), Error::::BalanceLow); }); } #[test] - fn destroying_asset_balance_with_positive_balance_should_work() { + fn burning_asset_balance_with_positive_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::max_value())); + assert_eq!(Assets::balance(0, 1), 0); }); } #[test] - fn destroying_asset_balance_with_zero_balance_should_not_work() { + fn burning_asset_balance_with_zero_balance_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, 10, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::destroy(Origin::signed(2), 0), Error::::BalanceZero); + assert_noop!(Assets::burn(Origin::signed(1), 0, 2, u64::max_value()), Error::::BalanceZero); }); } } diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..f6408e527f51b3a1cee059c68d6f2e2ef0543c33 --- /dev/null +++ b/frame/assets/src/weights.rs @@ -0,0 +1,207 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_assets +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 +//! DATE: 2020-12-03, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_assets +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/assets/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_assets. +pub trait WeightInfo { + fn create() -> Weight; + fn force_create() -> Weight; + fn destroy(z: u32, ) -> Weight; + fn force_destroy(z: u32, ) -> Weight; + fn mint() -> Weight; + fn burn() -> Weight; + fn transfer() -> Weight; + fn force_transfer() -> Weight; + fn freeze() -> Weight; + fn thaw() -> Weight; + fn transfer_ownership() -> Weight; + fn set_team() -> Weight; + fn set_max_zombies() -> Weight; +} + +/// Weights for pallet_assets using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn create() -> Weight { + (58_077_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn force_create() -> Weight { + (30_497_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn destroy(z: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_153_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn force_destroy(z: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_153_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn mint() -> Weight { + (45_600_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn burn() -> Weight { + (40_143_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn transfer() -> Weight { + (58_903_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn force_transfer() -> Weight { + (59_025_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn freeze() -> Weight { + (43_308_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn thaw() -> Weight { + (43_383_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn transfer_ownership() -> Weight { + (31_380_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn set_team() -> Weight { + (32_049_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn set_max_zombies() -> Weight { + (57_745_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn create() -> Weight { + (58_077_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn force_create() -> Weight { + (30_497_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn destroy(z: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_153_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn force_destroy(z: u32, ) -> Weight { + (0 as Weight) + .saturating_add((1_153_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(z as Weight))) + } + fn mint() -> Weight { + (45_600_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn burn() -> Weight { + (40_143_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn transfer() -> Weight { + (58_903_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn force_transfer() -> Weight { + (59_025_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn freeze() -> Weight { + (43_308_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn thaw() -> Weight { + (43_383_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn transfer_ownership() -> Weight { + (31_380_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn set_team() -> Weight { + (32_049_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn set_max_zombies() -> Weight { + (57_745_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/atomic-swap/README.md b/frame/atomic-swap/README.md index 1287e90bc0da54c93dd6e3056d986c4c00a04ea2..5dd502095d792ddbc8b4f866791ff7b98bff1ac9 100644 --- a/frame/atomic-swap/README.md +++ b/frame/atomic-swap/README.md @@ -20,4 +20,4 @@ claimed within a specified duration of time, the sender may cancel it. * `claim_swap` - called by the target to approve a swap * `cancel_swap` - may be called by a sender after a specified duration -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 31f0c0f42652519a4cda25ac5cec1a21279204d8..ac9b82b0df06798daefeb64f7d066e262bc555dc 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -19,7 +19,7 @@ //! //! A module for atomically sending funds. //! -//! - [`atomic_swap::Trait`](./trait.Trait.html) +//! - [`atomic_swap::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -56,7 +56,7 @@ use sp_runtime::RuntimeDebug; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] -pub struct PendingSwap { +pub struct PendingSwap { /// Source of the swap. pub source: T::AccountId, /// Action of this swap. @@ -74,7 +74,7 @@ pub type HashedProof = [u8; 32]; /// succeeds with best efforts. /// - **Claim**: claim any resources reserved in the first phrase. /// - **Cancel**: cancel any resources reserved in the first phrase. -pub trait SwapAction { +pub trait SwapAction { /// Reserve the resources needed for the swap, from the given `source`. The reservation is /// allowed to fail. If that is the case, the the full swap creation operation is cancelled. fn reserve(&self, source: &AccountId) -> DispatchResult; @@ -115,7 +115,7 @@ impl DerefMut for BalanceSwapAction where C: Reserva } } -impl SwapAction for BalanceSwapAction +impl SwapAction for BalanceSwapAction where C: ReservableCurrency { fn reserve(&self, source: &AccountId) -> DispatchResult { @@ -136,9 +136,9 @@ impl SwapAction for BalanceSwapAction> + Into<::Event>; + type Event: From> + Into<::Event>; /// Swap action. type SwapAction: SwapAction + Parameter; /// Limit of proof size. @@ -155,7 +155,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as AtomicSwap { + trait Store for Module as AtomicSwap { pub PendingSwaps: double_map hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) HashedProof => Option>; @@ -163,7 +163,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Swap already exists. AlreadyExist, /// Swap proof is invalid. @@ -186,7 +186,7 @@ decl_error! { decl_event!( /// Event of atomic swap pallet. pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, PendingSwap = PendingSwap, { /// Swap created. \[account, proof, swap\] @@ -201,7 +201,7 @@ decl_event!( decl_module! { /// Module definition of atomic swap pallet. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 060411c8815da66996a57661233777cc73efa75a..47b5102bc568cdc6617684740b2e0abcef10f00b 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -2,12 +2,9 @@ use super::*; -use frame_support::{ - impl_outer_origin, parameter_types, weights::Weight, -}; +use frame_support::{impl_outer_origin, parameter_types}; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; @@ -20,12 +17,14 @@ impl_outer_origin! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -37,13 +36,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -54,7 +46,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); @@ -67,7 +59,7 @@ parameter_types! { pub const ProofLimit: u32 = 1024; pub const ExpireDuration: u64 = 100; } -impl Trait for Test { +impl Config for Test { type Event = (); type SwapAction = BalanceSwapAction; type ProofLimit = ProofLimit; diff --git a/frame/aura/README.md b/frame/aura/README.md index 4f3eacbad8a061114309d642c7346acc829393a8..73ed986dd734dd860bb17f4d7ba4ff35c93eb776 100644 --- a/frame/aura/README.md +++ b/frame/aura/README.md @@ -25,4 +25,4 @@ If you're interested in hacking on this module, it is useful to understand the i [`ProvideInherent`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherent.html) and [`ProvideInherentData`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherentData.html) to create and check inherents. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index e8e0e616bc0dc632f851c94246f24936973f2f32..34f216850c675163b3db0c7365d281ddb0d67485 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -17,7 +17,7 @@ //! # Aura Module //! -//! - [`aura::Trait`](./trait.Trait.html) +//! - [`aura::Config`](./trait.Config.html) //! - [`Module`](./struct.Module.html) //! //! ## Overview @@ -66,13 +66,13 @@ use sp_consensus_aura::{ mod mock; mod tests; -pub trait Trait: pallet_timestamp::Trait { +pub trait Config: pallet_timestamp::Config { /// The identifier type for an authority. type AuthorityId: Member + Parameter + RuntimeAppPublic + Default; } decl_storage! { - trait Store for Module as Aura { + trait Store for Module as Aura { /// The last timestamp. LastTimestamp get(fn last): T::Moment; @@ -86,10 +86,10 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { } + pub struct Module for enum Call where origin: T::Origin { } } -impl Module { +impl Module { fn change_authorities(new: Vec) { >::put(&new); @@ -108,11 +108,11 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = T::AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl pallet_session::OneSessionHandler for Module { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -145,7 +145,7 @@ impl pallet_session::OneSessionHandler for Module { } } -impl FindAuthor for Module { +impl FindAuthor for Module { fn find_author<'a, I>(digests: I) -> Option where I: 'a + IntoIterator { @@ -167,7 +167,7 @@ impl FindAuthor for Module { #[doc(hidden)] pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); -impl> FindAuthor +impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option @@ -183,7 +183,7 @@ impl> FindAuthor /// Find the authority ID of the Aura authority who authored the current block. pub type AuraAuthorId = FindAccountFromAuthorIndex>; -impl IsMember for Module { +impl IsMember for Module { fn is_member(authority_id: &T::AuthorityId) -> bool { Self::authorities() .iter() @@ -191,12 +191,12 @@ impl IsMember for Module { } } -impl Module { +impl Module { /// Determine the Aura slot-duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within // the majority of its slot. - ::MinimumPeriod::get().saturating_mul(2u32.into()) + ::MinimumPeriod::get().saturating_mul(2u32.into()) } fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { @@ -218,13 +218,13 @@ impl Module { } } -impl OnTimestampSet for Module { +impl OnTimestampSet for Module { fn on_timestamp_set(moment: T::Moment) { Self::on_timestamp_set(moment, Self::slot_duration()) } } -impl ProvideInherent for Module { +impl ProvideInherent for Module { type Call = pallet_timestamp::Call; type Error = MakeFatalError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index a3875727e47c20112d728930b3a1618293284783..1fcb1c2340d13c68b0badde2f424e2d8ef168149 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -19,13 +19,13 @@ #![cfg(test)] -use crate::{Trait, Module, GenesisConfig}; +use crate::{Config, Module, GenesisConfig}; use sp_consensus_aura::ed25519::AuthorityId; use sp_runtime::{ - traits::IdentityLookup, Perbill, + traits::IdentityLookup, testing::{Header, UintAuthorityId}, }; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::{impl_outer_origin, parameter_types}; use sp_io; use sp_core::H256; @@ -39,14 +39,16 @@ pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); pub const MinimumPeriod: u64 = 1; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -58,13 +60,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = (); @@ -73,14 +68,14 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl Trait for Test { +impl Config for Test { type AuthorityId = AuthorityId; } diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 09be533474fca225c0055747f1713d62274c85a5..2d275e01bba24d2c26274fd63d31cc909af4da81 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -28,10 +28,10 @@ use frame_support::{decl_module, decl_storage}; use sp_authority_discovery::AuthorityId; /// The module's config trait. -pub trait Trait: frame_system::Trait + pallet_session::Trait {} +pub trait Config: frame_system::Config + pallet_session::Config {} decl_storage! { - trait Store for Module as AuthorityDiscovery { + trait Store for Module as AuthorityDiscovery { /// Keys of the current and next authority set. Keys get(fn keys): Vec; } @@ -42,11 +42,11 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { } } -impl Module { +impl Module { /// Retrieve authority identifiers of the current and next authority set. pub fn authorities() -> Vec { Keys::get() @@ -60,11 +60,11 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl pallet_session::OneSessionHandler for Module { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(authorities: I) @@ -93,7 +93,7 @@ impl pallet_session::OneSessionHandler for Module { #[cfg(test)] mod tests { use super::*; - use sp_authority_discovery::{AuthorityPair}; + use sp_authority_discovery::AuthorityPair; use sp_application_crypto::Pair; use sp_core::{crypto::key_types, H256}; use sp_io::TestExternalities; @@ -101,19 +101,19 @@ mod tests { testing::{Header, UintAuthorityId}, traits::{ConvertInto, IdentityLookup, OpaqueKeys}, Perbill, KeyTypeId, }; - use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; + use frame_support::{impl_outer_origin, parameter_types}; type AuthorityDiscovery = Module; #[derive(Clone, Eq, PartialEq)] pub struct Test; - impl Trait for Test {} + impl Config for Test {} parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } - impl pallet_session::Trait for Test { + impl pallet_session::Config for Test { type SessionManager = (); type Keys = UintAuthorityId; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -126,7 +126,7 @@ mod tests { type WeightInfo = (); } - impl pallet_session::historical::Trait for Test { + impl pallet_session::historical::Config for Test { type FullIdentification = (); type FullIdentificationOf = (); } @@ -138,13 +138,15 @@ mod tests { pub const Offset: BlockNumber = 0; pub const UncleGenerations: u64 = 0; pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; @@ -156,13 +158,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = (); diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 0a10c8849571b4477daf2d6eea59db7338841c23..b991beaaa2b67a3ac0f64b66ead26a614ce93c7c 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -34,7 +34,7 @@ use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; const MAX_UNCLES: usize = 10; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Find the author of a block. type FindAuthor: FindAuthor; /// The number of blocks back we should accept uncles. @@ -152,7 +152,7 @@ enum UncleEntryItem { } decl_storage! { - trait Store for Module as Authorship { + trait Store for Module as Authorship { /// Uncles Uncles: Vec>; /// Author of current block. @@ -164,7 +164,7 @@ decl_storage! { decl_error! { /// Error for the authorship module. - pub enum Error for Module { + pub enum Error for Module { /// The uncle parent not in the chain. InvalidUncleParent, /// Uncles already set in the block. @@ -183,7 +183,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn on_initialize(now: T::BlockNumber) -> Weight { @@ -223,7 +223,7 @@ decl_module! { } } -impl Module { +impl Module { /// Fetch the author of the block. /// /// This is safe to invoke in `on_initialize` implementations, as well @@ -337,7 +337,7 @@ impl Module { } } -impl ProvideInherent for Module { +impl ProvideInherent for Module { type Call = Call; type Error = InherentError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; @@ -399,9 +399,9 @@ mod tests { use super::*; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, generic::DigestItem, Perbill, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, generic::DigestItem, }; - use frame_support::{parameter_types, impl_outer_origin, ConsensusEngineId, weights::Weight}; + use frame_support::{parameter_types, impl_outer_origin, ConsensusEngineId}; impl_outer_origin!{ pub enum Origin for Test where system = frame_system {} @@ -412,13 +412,15 @@ mod tests { parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -430,13 +432,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = (); @@ -449,7 +444,7 @@ mod tests { pub const UncleGenerations: u64 = 5; } - impl Trait for Test { + impl Config for Test { type FindAuthor = AuthorGiven; type UncleGenerations = UncleGenerations; type FilterUncle = SealVerify; diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index 322dff92f2398b330efff33357125da694c85451..55aaedfe082fe61eef176568813022e05d7cb794 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -48,14 +48,14 @@ use sp_staking::{ }; use sp_std::prelude::*; -use crate::{Call, Module, Trait}; +use crate::{Call, Module, Config}; /// A trait with utility methods for handling equivocation reports in BABE. /// The trait provides methods for reporting an offence triggered by a valid /// equivocation report, checking the current block author (to declare as the /// reporter), and also for creating and submitting equivocation report /// extrinsics (useful only in offchain context). -pub trait HandleEquivocation { +pub trait HandleEquivocation { /// Report an offence proved by the given reporters. fn report_offence( reporters: Vec, @@ -75,7 +75,7 @@ pub trait HandleEquivocation { fn block_author() -> Option; } -impl HandleEquivocation for () { +impl HandleEquivocation for () { fn report_offence( _reporters: Vec, _offence: BabeEquivocationOffence, @@ -120,7 +120,7 @@ where // We use the authorship pallet to fetch the current block author and use // `offchain::SendTransactionTypes` for unsigned extrinsic creation and // submission. - T: Trait + pallet_authorship::Trait + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, // A system for reporting offences after valid equivocation reports are // processed. R: ReportOffence< @@ -164,7 +164,7 @@ where /// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` /// to local calls (i.e. extrinsics generated on this node) or that already in a block. This /// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::report_equivocation_unsigned(equivocation_proof, _) = call { diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 8cab698fda09386419dbc5f16fc6c1e2bb96e7ad..a61f1244cbebde12de74cf7cdd45eac2b65b9915 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -62,7 +62,7 @@ mod tests; pub use equivocation::{BabeEquivocationOffence, EquivocationHandler, HandleEquivocation}; -pub trait Trait: pallet_timestamp::Trait { +pub trait Config: pallet_timestamp::Config { /// The amount of time, in slots, that each epoch should last. type EpochDuration: Get; @@ -115,7 +115,7 @@ pub trait WeightInfo { pub trait EpochChangeTrigger { /// Trigger an epoch change, if any should take place. This should be called /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); + fn trigger(now: T::BlockNumber); } /// A type signifying to BABE that an external trigger @@ -123,7 +123,7 @@ pub trait EpochChangeTrigger { pub struct ExternalTrigger; impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. + fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. } /// A type signifying to BABE that it should perform epoch changes @@ -131,7 +131,7 @@ impl EpochChangeTrigger for ExternalTrigger { pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { + fn trigger(now: T::BlockNumber) { if >::should_epoch_change(now) { let authorities = >::authorities(); let next_authorities = authorities.clone(); @@ -146,7 +146,7 @@ const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; type MaybeRandomness = Option; decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// An equivocation proof provided as part of an equivocation report is invalid. InvalidEquivocationProof, /// A key ownership proof provided as part of an equivocation report is invalid. @@ -157,7 +157,7 @@ decl_error! { } decl_storage! { - trait Store for Module as Babe { + trait Store for Module as Babe { /// Current epoch index. pub EpochIndex get(fn epoch_index): u64; @@ -230,7 +230,7 @@ decl_storage! { decl_module! { /// The BABE Pallet - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The number of **slots** that an epoch takes. We couple sessions to /// epochs, i.e. we start a new session once the new epoch begins. const EpochDuration: u64 = T::EpochDuration::get(); @@ -271,7 +271,7 @@ decl_module! { /// the equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence will /// be reported. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] + #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation( origin, equivocation_proof: EquivocationProof, @@ -294,7 +294,7 @@ decl_module! { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] + #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] fn report_equivocation_unsigned( origin, equivocation_proof: EquivocationProof, @@ -311,7 +311,7 @@ decl_module! { } } -impl RandomnessT<::Hash> for Module { +impl RandomnessT<::Hash> for Module { /// Some BABE blocks have VRF outputs where the block producer has exactly one bit of influence, /// either they make the block or they do not make the block and thus someone else makes the /// next block. Yet, this randomness is not fresh in all BABE blocks. @@ -332,14 +332,14 @@ impl RandomnessT<::Hash> for Module { subject.reserve(VRF_OUTPUT_LENGTH); subject.extend_from_slice(&Self::randomness()[..]); - ::Hashing::hash(&subject[..]) + ::Hashing::hash(&subject[..]) } } /// A BABE public key pub type BabeKey = [u8; PUBLIC_KEY_LENGTH]; -impl FindAuthor for Module { +impl FindAuthor for Module { fn find_author<'a, I>(digests: I) -> Option where I: 'a + IntoIterator { @@ -354,7 +354,7 @@ impl FindAuthor for Module { } } -impl IsMember for Module { +impl IsMember for Module { fn is_member(authority_id: &AuthorityId) -> bool { >::authorities() .iter() @@ -362,7 +362,7 @@ impl IsMember for Module { } } -impl pallet_session::ShouldEndSession for Module { +impl pallet_session::ShouldEndSession for Module { fn should_end_session(now: T::BlockNumber) -> bool { // it might be (and it is in current implementation) that session module is calling // should_end_session() from it's own on_initialize() handler @@ -374,12 +374,12 @@ impl pallet_session::ShouldEndSession for Module { } } -impl Module { +impl Module { /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within // the majority of their slot. - ::MinimumPeriod::get().saturating_mul(2u32.into()) + ::MinimumPeriod::get().saturating_mul(2u32.into()) } /// Determine whether an epoch change should take place at this block. @@ -690,11 +690,11 @@ impl Module { } } -impl OnTimestampSet for Module { +impl OnTimestampSet for Module { fn on_timestamp_set(_moment: T::Moment) { } } -impl frame_support::traits::EstimateNextSessionRotation for Module { +impl frame_support::traits::EstimateNextSessionRotation for Module { fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { Self::next_expected_epoch_change(now) } @@ -706,17 +706,17 @@ impl frame_support::traits::EstimateNextSessionRotation frame_support::traits::Lateness for Module { +impl frame_support::traits::Lateness for Module { fn lateness(&self) -> T::BlockNumber { Self::lateness() } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl pallet_session::OneSessionHandler for Module { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -766,7 +766,7 @@ fn compute_randomness( sp_io::hashing::blake2_256(&s) } -impl ProvideInherent for Module { +impl ProvideInherent for Module { type Call = pallet_timestamp::Call; type Error = MakeFatalError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 9f00a4ddfc3cd0f49d2e419c4e419baffb6d0245..8af92c79e91f4e363e60c439c65e8a1fd0840096 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -18,7 +18,7 @@ //! Test utilities use codec::Encode; -use super::{Trait, Module, CurrentSlot}; +use super::{Config, Module, CurrentSlot}; use sp_runtime::{ Perbill, impl_opaque_keys, curve::PiecewiseLinear, @@ -57,16 +57,18 @@ pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const EpochDuration: u64 = 3; pub const ExpectedBlockTime: u64 = 1; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -79,13 +81,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); @@ -107,9 +102,9 @@ impl_opaque_keys! { } } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type Event = (); - type ValidatorId = ::AccountId; + type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; @@ -120,7 +115,7 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -129,7 +124,7 @@ parameter_types! { pub const UncleGenerations: u64 = 0; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -140,7 +135,7 @@ parameter_types! { pub const MinimumPeriod: u64 = 1; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -151,7 +146,7 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u128; type DustRemoval = (); @@ -183,7 +178,7 @@ parameter_types! { pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type Event = (); @@ -209,17 +204,18 @@ impl pallet_staking::Trait for Test { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) + * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Test { +impl pallet_offences::Config for Test { type Event = (); type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl Trait for Test { +impl Config for Test { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = crate::ExternalTrigger; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 06bf84614ca6df58c8603dbd2a5bf8e55083faf1..29b080493f46b12784ed6a38049ae467dc31c883 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -206,7 +206,7 @@ fn authority_index() { #[test] fn can_predict_next_epoch_change() { new_test_ext(1).execute_with(|| { - assert_eq!(::EpochDuration::get(), 3); + assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); assert_eq!(Babe::genesis_slot(), 6); @@ -227,7 +227,7 @@ fn can_predict_next_epoch_change() { #[test] fn can_enact_next_config() { new_test_ext(1).execute_with(|| { - assert_eq!(::EpochDuration::get(), 3); + assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); assert_eq!(Babe::genesis_slot(), 6); @@ -661,7 +661,7 @@ fn report_equivocation_has_valid_weight() { // but there's a lower bound of 100 validators. assert!( (1..=100) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] == w[1]) @@ -671,7 +671,7 @@ fn report_equivocation_has_valid_weight() { // with every extra validator. assert!( (100..=1000) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] < w[1]) diff --git a/frame/balances/README.md b/frame/balances/README.md index a93ed5f306e0fad6f7b074164624e256acf343ec..cbbfea75e6848a1a25af6b58da00270de16677ca 100644 --- a/frame/balances/README.md +++ b/frame/balances/README.md @@ -83,8 +83,8 @@ The Contract module uses the `Currency` trait to handle gas payment, and its typ ```rust use frame_support::traits::Currency; -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; ``` @@ -93,11 +93,11 @@ The Staking module uses the `LockableCurrency` trait to lock a stash account's f ```rust use frame_support::traits::{WithdrawReasons, LockableCurrency}; use sp_runtime::traits::Bounded; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { type Currency: LockableCurrency; } -fn update_ledger( +fn update_ledger( controller: &T::AccountId, ledger: &StakingLedger ) { @@ -117,6 +117,6 @@ The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-bala ## Assumptions -* Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. +* Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 141a360f7e180a34013a205f7e31411ec2fc9299..b7d2488bfdd09202f47ade4d1f2bfe148e7a9d15 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -19,7 +19,7 @@ //! //! The Balances module provides functionality for handling accounts and balances. //! -//! - [`balances::Trait`](./trait.Trait.html) +//! - [`balances::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -99,12 +99,12 @@ //! //! ``` //! use frame_support::traits::Currency; -//! # pub trait Trait: frame_system::Trait { +//! # pub trait Config: frame_system::Config { //! # type Currency: Currency; //! # } //! -//! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -//! pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +//! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +//! pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; //! //! # fn main() {} //! ``` @@ -114,17 +114,17 @@ //! ``` //! use frame_support::traits::{WithdrawReasons, LockableCurrency}; //! use sp_runtime::traits::Bounded; -//! pub trait Trait: frame_system::Trait { +//! pub trait Config: frame_system::Config { //! type Currency: LockableCurrency; //! } -//! # struct StakingLedger { -//! # stash: ::AccountId, -//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, +//! # struct StakingLedger { +//! # stash: ::AccountId, +//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, //! # phantom: std::marker::PhantomData, //! # } //! # const STAKING_ID: [u8; 8] = *b"staking "; //! -//! fn update_ledger( +//! fn update_ledger( //! controller: &T::AccountId, //! ledger: &StakingLedger //! ) { @@ -145,7 +145,7 @@ //! //! ## Assumptions //! -//! * Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. +//! * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. #![cfg_attr(not(feature = "std"), no_std)] @@ -179,7 +179,7 @@ use frame_system::{self as system, ensure_signed, ensure_root}; pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; pub use weights::WeightInfo; -pub trait Subtrait: frame_system::Trait { +pub trait Subtrait: frame_system::Config { /// The balance of an account. type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + MaybeSerializeDeserialize + Debug; @@ -198,7 +198,7 @@ pub trait Subtrait: frame_system::Trait { type MaxLocks: Get; } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The balance of an account. type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + MaybeSerializeDeserialize + Debug; @@ -207,7 +207,7 @@ pub trait Trait: frame_system::Trait { type DustRemoval: OnUnbalanced>; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The minimum amount required to keep an account open. type ExistentialDeposit: Get; @@ -223,18 +223,18 @@ pub trait Trait: frame_system::Trait { type MaxLocks: Get; } -impl, I: Instance> Subtrait for T { +impl, I: Instance> Subtrait for T { type Balance = T::Balance; type ExistentialDeposit = T::ExistentialDeposit; type AccountStore = T::AccountStore; - type WeightInfo = >::WeightInfo; + type WeightInfo = >::WeightInfo; type MaxLocks = T::MaxLocks; } decl_event!( pub enum Event where - ::AccountId, - >::Balance + ::AccountId, + >::Balance { /// An account was created with some free balance. \[account, free_balance\] Endowed(AccountId, Balance), @@ -259,7 +259,7 @@ decl_event!( ); decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Vesting balance too high to send value VestingBalance, /// Account liquidity restrictions prevent withdrawal @@ -382,7 +382,7 @@ impl Default for Releases { } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Balances { + trait Store for Module, I: Instance=DefaultInstance> as Balances { /// The total units issued in the system. pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n) @@ -408,10 +408,16 @@ decl_storage! { build(|config: &GenesisConfig| { for (_, balance) in &config.balances { assert!( - *balance >= >::ExistentialDeposit::get(), - "the balance of any account should always be more than existential deposit.", + *balance >= >::ExistentialDeposit::get(), + "the balance of any account should always be at least the existential deposit.", ) } + + // ensure no duplicates exist. + let endowed_accounts = config.balances.iter().map(|(x, _)| x).cloned().collect::>(); + + assert!(endowed_accounts.len() == config.balances.len(), "duplicate balances in genesis."); + for &(ref who, free) in config.balances.iter() { T::AccountStore::insert(who, AccountData { free, .. Default::default() }); } @@ -420,7 +426,7 @@ decl_storage! { } decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { type Error = Error; /// The minimum amount required to keep an account open. @@ -565,7 +571,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { // PRIVATE MUTABLES /// Get the free balance of an account. @@ -704,7 +710,7 @@ impl, I: Instance> Module { // of the inner member. mod imbalances { use super::{ - result, DefaultInstance, Imbalance, Trait, Zero, Instance, Saturating, + result, DefaultInstance, Imbalance, Config, Zero, Instance, Saturating, StorageValue, TryDrop, }; use sp_std::mem; @@ -712,9 +718,9 @@ mod imbalances { /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. #[must_use] - pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); + pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); - impl, I: Instance> PositiveImbalance { + impl, I: Instance> PositiveImbalance { /// Create a new positive imbalance from a balance. pub fn new(amount: T::Balance) -> Self { PositiveImbalance(amount) @@ -724,22 +730,22 @@ mod imbalances { /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been destroyed without any equal and opposite accounting. #[must_use] - pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); + pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); - impl, I: Instance> NegativeImbalance { + impl, I: Instance> NegativeImbalance { /// Create a new negative imbalance from a balance. pub fn new(amount: T::Balance) -> Self { NegativeImbalance(amount) } } - impl, I: Instance> TryDrop for PositiveImbalance { + impl, I: Instance> TryDrop for PositiveImbalance { fn try_drop(self) -> result::Result<(), Self> { self.drop_zero() } } - impl, I: Instance> Imbalance for PositiveImbalance { + impl, I: Instance> Imbalance for PositiveImbalance { type Opposite = NegativeImbalance; fn zero() -> Self { @@ -784,13 +790,13 @@ mod imbalances { } } - impl, I: Instance> TryDrop for NegativeImbalance { + impl, I: Instance> TryDrop for NegativeImbalance { fn try_drop(self) -> result::Result<(), Self> { self.drop_zero() } } - impl, I: Instance> Imbalance for NegativeImbalance { + impl, I: Instance> Imbalance for NegativeImbalance { type Opposite = PositiveImbalance; fn zero() -> Self { @@ -835,7 +841,7 @@ mod imbalances { } } - impl, I: Instance> Drop for PositiveImbalance { + impl, I: Instance> Drop for PositiveImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { >::mutate( @@ -844,7 +850,7 @@ mod imbalances { } } - impl, I: Instance> Drop for NegativeImbalance { + impl, I: Instance> Drop for NegativeImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { >::mutate( @@ -854,7 +860,7 @@ mod imbalances { } } -impl, I: Instance> Currency for Module where +impl, I: Instance> Currency for Module where T::Balance: MaybeSerializeDeserialize + Debug { type Balance = T::Balance; @@ -1103,7 +1109,7 @@ impl, I: Instance> Currency for Module where } } -impl, I: Instance> ReservableCurrency for Module where +impl, I: Instance> ReservableCurrency for Module where T::Balance: MaybeSerializeDeserialize + Debug { /// Check if `who` can reserve `value` from their free balance. @@ -1218,7 +1224,7 @@ impl, I: Instance> ReservableCurrency for Module /// NOTE: You probably won't need to use this! This only needs to be "wired in" to System module /// if you're using the local balance storage. **If you're using the composite system account /// storage (which is the default in most examples and tests) then there's no need.** -impl, I: Instance> OnKilledAccount for Module { +impl, I: Instance> OnKilledAccount for Module { fn on_killed_account(who: &T::AccountId) { Account::::mutate_exists(who, |account| { let total = account.as_ref().map(|acc| acc.total()).unwrap_or_default(); @@ -1231,7 +1237,7 @@ impl, I: Instance> OnKilledAccount for Module { } } -impl, I: Instance> LockableCurrency for Module +impl, I: Instance> LockableCurrency for Module where T::Balance: MaybeSerializeDeserialize + Debug { @@ -1296,7 +1302,7 @@ where } } -impl, I: Instance> IsDeadAccount for Module where +impl, I: Instance> IsDeadAccount for Module where T::Balance: MaybeSerializeDeserialize + Debug { fn is_dead_account(who: &T::AccountId) -> bool { diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index b8cf90dad9222f2efc2c5ea028e2718a16f5c11e..f47776e0ee6c4555d8e9e790dd4cbd7fa3f2ec59 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -23,7 +23,7 @@ pub struct CallWithDispatchInfo; impl sp_runtime::traits::Dispatchable for CallWithDispatchInfo { type Origin = (); - type Trait = (); + type Config = (); type Info = frame_support::weights::DispatchInfo; type PostInfo = frame_support::weights::PostDispatchInfo; @@ -55,7 +55,7 @@ macro_rules! decl_tests { pub type System = frame_system::Module<$test>; pub type Balances = Module<$test>; - pub const CALL: &<$test as frame_system::Trait>::Call = &$crate::tests::CallWithDispatchInfo; + pub const CALL: &<$test as frame_system::Config>::Call = &$crate::tests::CallWithDispatchInfo; /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { @@ -91,7 +91,7 @@ macro_rules! decl_tests { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); - assert!(!<::AccountStore as StoredMap>>::is_explicit(&1)); + assert!(!<::AccountStore as StoredMap>>::is_explicit(&1)); }); } @@ -630,7 +630,7 @@ macro_rules! decl_tests { } #[test] - #[should_panic = "the balance of any account should always be more than existential deposit."] + #[should_panic = "the balance of any account should always be at least the existential deposit."] fn cannot_set_genesis_value_below_ed() { ($existential_deposit).with(|v| *v.borrow_mut() = 11); let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); @@ -639,6 +639,15 @@ macro_rules! decl_tests { }.assimilate_storage(&mut t).unwrap(); } + #[test] + #[should_panic = "duplicate balances in genesis."] + fn cannot_set_genesis_value_twice() { + let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); + let _ = GenesisConfig::<$test> { + balances: vec![(1, 10), (2, 20), (1, 15)], + }.assimilate_storage(&mut t).unwrap(); + } + #[test] fn dust_moves_between_free_and_reserved() { <$ext_builder>::default() diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index fd4ba1fd3c3059ae430f0f60af990ce5e2e35ef3..81c2b895273b55d27012a44e3629f950671bf30e 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -20,7 +20,6 @@ #![cfg(test)] use sp_runtime::{ - Perbill, traits::IdentityLookup, testing::Header, }; @@ -29,7 +28,7 @@ use sp_io; use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; use pallet_transaction_payment::CurrencyAdapter; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; +use crate::{GenesisConfig, Module, Config, decl_tests, tests::CallWithDispatchInfo}; use frame_system as system; impl_outer_origin!{ @@ -52,13 +51,15 @@ impl_outer_event! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -70,13 +71,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = super::AccountData; @@ -87,14 +81,14 @@ impl frame_system::Trait for Test { parameter_types! { pub const TransactionByteFee: u64 = 1; } -impl pallet_transaction_payment::Trait for Test { +impl pallet_transaction_payment::Config for Test { type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } -impl Trait for Test { +impl Config for Test { type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index c0a5d23ff1a410068ed8282bf07cffe3b6fbb4a6..c168e1d8e59e17c6db3006049c3799f1adf40f89 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -20,7 +20,6 @@ #![cfg(test)] use sp_runtime::{ - Perbill, traits::IdentityLookup, testing::Header, }; @@ -29,7 +28,7 @@ use sp_io; use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; use frame_support::traits::StorageMapShim; use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; +use crate::{GenesisConfig, Module, Config, decl_tests, tests::CallWithDispatchInfo}; use pallet_transaction_payment::CurrencyAdapter; use frame_system as system; @@ -53,13 +52,15 @@ impl_outer_event! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -71,13 +72,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = super::AccountData; @@ -88,7 +82,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const TransactionByteFee: u64 = 1; } -impl pallet_transaction_payment::Trait for Test { +impl pallet_transaction_payment::Config for Test { type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; @@ -97,7 +91,7 @@ impl pallet_transaction_payment::Trait for Test { parameter_types! { pub const MaxLocks: u32 = 50; } -impl Trait for Test { +impl Config for Test { type Balance = u64; type DustRemoval = (); type Event = Event; diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 45e4195f962d1f99494b5552d66141a6e1830073..189947003b133d2258c2dd46a6443bb19dfa421f 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -53,7 +53,7 @@ pub trait WeightInfo { /// Weights for pallet_balances using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn transfer() -> Weight { (94_088_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 924ffc8627abc09e08fc8682098c4c4e6cbb527c..acd29e468243e1fa2210ce7efd253305e22f4ab6 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -linregress = "0.1" +linregress = { version = "0.4.0", optional = true } paste = "0.1" codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } sp-api = { version = "2.0.0", path = "../../primitives/api", default-features = false } @@ -38,4 +38,5 @@ std = [ "sp-std/std", "frame-support/std", "frame-system/std", + "linregress", ] diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index dafb4a74b669fef3c90330acae1ae32e27d1712f..dafe42de92e8a24b9a49e615c89128eb9f769c9d 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -18,9 +18,11 @@ //! Tools for analyzing the benchmark results. use std::collections::BTreeMap; -use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel}; +use linregress::{FormulaRegressionBuilder, RegressionDataBuilder}; use crate::BenchmarkResults; +pub use linregress::RegressionModel; + pub struct Analysis { pub base: u128, pub slopes: Vec, diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 284b0545d03a5a9e259639cbb1c3526bc9c048fa..6296c000e289f959eea62fc39305b7a5d95cad65 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -26,7 +26,7 @@ mod analysis; pub use utils::*; #[cfg(feature = "std")] -pub use analysis::{Analysis, BenchmarkSelector}; +pub use analysis::{Analysis, BenchmarkSelector, RegressionModel}; #[doc(hidden)] pub use sp_io::storage::root as storage_root; pub use sp_runtime::traits::Zero; @@ -137,7 +137,7 @@ pub use sp_storage::TrackedStorageKey; /// /// Test functions are automatically generated for each benchmark and are accessible to you when you /// run `cargo test`. All tests are named `test_benchmark_`, expect you to pass them -/// the Runtime Trait, and run them in a test externalities environment. The test function runs your +/// the Runtime Config, and run them in a test externalities environment. The test function runs your /// benchmark just like a regular benchmark, but only testing at the lowest and highest values for /// each component. The function will return `Ok(())` if the benchmarks return no errors. /// @@ -636,7 +636,7 @@ macro_rules! benchmark_backend { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] - impl, I: Instance)? > + impl, I: Instance)? > $crate::BenchmarkingSetup for $name where $( $where_clause )* { @@ -710,7 +710,7 @@ macro_rules! selected_benchmark { } // Allow us to select a benchmark from the list of available benchmarks. - impl, I: Instance )? > + impl, I: Instance )? > $crate::BenchmarkingSetup for SelectedBenchmark where $( $where_clause )* { @@ -750,9 +750,9 @@ macro_rules! impl_benchmark { ( $( { $( $name_inst:ident )? } $name:ident )* ) ( $( $name_extra:ident ),* ) ) => { - impl, I: Instance)? > + impl, I: Instance)? > $crate::Benchmarking<$crate::BenchmarkResults> for Module - where T: frame_system::Trait, $( $where_clause )* + where T: frame_system::Config, $( $where_clause )* { fn benchmarks(extra: bool) -> Vec<&'static [u8]> { let mut all = vec![ $( stringify!($name).as_ref() ),* ]; @@ -948,8 +948,8 @@ macro_rules! impl_benchmark_test { $name:ident ) => { $crate::paste::item! { - fn [] () -> Result<(), &'static str> - where T: frame_system::Trait, $( $where_clause )* + fn [] () -> Result<(), &'static str> + where T: frame_system::Config, $( $where_clause )* { let selected_benchmark = SelectedBenchmark::$name; let components = < @@ -1052,10 +1052,29 @@ macro_rules! impl_benchmark_test { /// ``` /// /// At the end of `dispatch_benchmark`, you should return this batches object. +/// +/// In the case where you have multiple instances of a pallet that you need to separately benchmark, +/// the name of your module struct will be used as a suffix to your outputted weight file. For +/// example: +/// +/// ```ignore +/// add_benchmark!(params, batches, pallet_balances, Balances); // pallet_balances.rs +/// add_benchmark!(params, batches, pallet_collective, Council); // pallet_collective_council.rs +/// add_benchmark!(params, batches, pallet_collective, TechnicalCommittee); // pallet_collective_technical_committee.rs +/// ``` +/// +/// You can manipulate this suffixed string by using a type alias if needed. For example: +/// +/// ```ignore +/// type Council2 = TechnicalCommittee; +/// add_benchmark!(params, batches, pallet_collective, Council2); // pallet_collective_council_2.rs +/// ``` + #[macro_export] macro_rules! add_benchmark { ( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => ( let name_string = stringify!($name).as_bytes(); + let instance_string = stringify!( $( $location )* ).as_bytes(); let (config, whitelist) = $params; let $crate::BenchmarkConfig { pallet, @@ -1071,6 +1090,9 @@ macro_rules! add_benchmark { if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { for benchmark in $( $location )*::benchmarks(*extra).into_iter() { $batches.push($crate::BenchmarkBatch { + pallet: name_string.to_vec(), + instance: instance_string.to_vec(), + benchmark: benchmark.to_vec(), results: $( $location )*::run_benchmark( benchmark, &lowest_range_values[..], @@ -1080,12 +1102,13 @@ macro_rules! add_benchmark { whitelist, *verify, )?, - pallet: name_string.to_vec(), - benchmark: benchmark.to_vec(), }); } } else { $batches.push($crate::BenchmarkBatch { + pallet: name_string.to_vec(), + instance: instance_string.to_vec(), + benchmark: benchmark.clone(), results: $( $location )*::run_benchmark( &benchmark[..], &lowest_range_values[..], @@ -1095,8 +1118,6 @@ macro_rules! add_benchmark { whitelist, *verify, )?, - pallet: name_string.to_vec(), - benchmark: benchmark.clone(), }); } } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 0429d98e18618dab5294b34dca56b8a5141a1ce1..f86abebbb9287c19b4f383723280d9913f0a0217 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -29,16 +29,16 @@ use frame_support::{ use frame_system::{RawOrigin, ensure_signed, ensure_none}; decl_storage! { - trait Store for Module as Test where - ::OtherEvent: Into<::Event> + trait Store for Module as Test where + ::OtherEvent: Into<::Event> { Value get(fn value): Option; } } decl_module! { - pub struct Module for enum Call where - origin: T::Origin, ::OtherEvent: Into<::Event> + pub struct Module for enum Call where + origin: T::Origin, ::OtherEvent: Into<::Event> { #[weight = 0] fn set_value(origin, n: u32) -> DispatchResult { @@ -59,12 +59,12 @@ impl_outer_origin! { pub enum Origin for Test where system = frame_system {} } -pub trait OtherTrait { +pub trait OtherConfig { type OtherEvent; } -pub trait Trait: frame_system::Trait + OtherTrait - where Self::OtherEvent: Into<::Event> +pub trait Config: frame_system::Config + OtherConfig + where Self::OtherEvent: Into<::Event> { type Event; } @@ -72,8 +72,11 @@ pub trait Trait: frame_system::Trait + OtherTrait #[derive(Clone, Eq, PartialEq)] pub struct Test; -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -85,13 +88,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type MaximumBlockLength = (); - type AvailableBlockRatio = (); type Version = (); type PalletInfo = (); type AccountData = (); @@ -100,11 +96,11 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -impl Trait for Test { +impl Config for Test { type Event = (); } -impl OtherTrait for Test { +impl OtherConfig for Test { type OtherEvent = (); } @@ -113,7 +109,7 @@ fn new_test_ext() -> sp_io::TestExternalities { } benchmarks!{ - where_clause { where ::OtherEvent: Into<::Event> } + where_clause { where ::OtherEvent: Into<::Event> } _ { // Define a common range for `b`. diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 042f4b707aef45f7eda298738c17ffc0379d1da7..2c2aee910e3641b44118e4871efa5e5fd87718e1 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -43,6 +43,8 @@ impl std::fmt::Display for BenchmarkParameter { pub struct BenchmarkBatch { /// The pallet containing this benchmark. pub pallet: Vec, + /// The instance of this pallet being benchmarked. + pub instance: Vec, /// The extrinsic (or benchmark name) of this benchmark. pub benchmark: Vec, /// The results from this benchmark. diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..214637bb6c8de7653a8f592c5b10b7eea3ab4359 --- /dev/null +++ b/frame/bounties/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pallet-bounties" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage bounties" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-treasury = { version = "2.0.0", default-features = false, path = "../treasury" } + +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +pallet-balances = { version = "2.0.0", path = "../balances" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "pallet-treasury/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] diff --git a/frame/bounties/README.md b/frame/bounties/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bf63fca5f34b23ec3a7efa4385556d0ba1672bad --- /dev/null +++ b/frame/bounties/README.md @@ -0,0 +1,52 @@ +# Bounties Module ( pallet-bounties ) + +## Bounty + +**Note :: This pallet is tightly coupled with pallet-treasury** + +A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that +needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after +the bounty is approved and funded by Council, to be delegated with the responsibility of assigning a +payout address once the specified set of objectives is completed. + +After the Council has activated a bounty, it delegates the work that requires expertise to a curator +in exchange of a deposit. Once the curator accepts the bounty, they get to close the active bounty. +Closing the active bounty enacts a delayed payout to the payout address, the curator fee and the +return of the curator deposit. The delay allows for intervention through regular democracy. The +Council gets to unassign the curator, resulting in a new curator election. The Council also gets to +cancel the bounty if deemed necessary before assigning a curator or once the bounty is active or +payout is pending, resulting in the slash of the curator's deposit. + +### Terminology + +- **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by + the Treasury. +- **Proposer:** An account proposing a bounty spending. +- **Curator:** An account managing the bounty and assigning a payout address receiving the reward + for the completion of work. +- **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on + deposit per byte within the bounty description. +- **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The + deposit is returned when/if the bounty is completed. +- **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is + rewarded. +- **Payout address:** The account to which the total or part of the bounty is assigned to. +- **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. +- **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. + +## Interface + +### Dispatchable Functions + +Bounty protocol: +- `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of + tasks and stake the required deposit. +- `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of + work. +- `propose_curator` - Assign an account to a bounty as candidate curator. +- `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +- `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +- `award_bounty` - Close and pay out the specified amount for the completed work. +- `claim_bounty` - Claim a specific bounty amount from the Payout Address. +- `unassign_curator` - Unassign an accepted curator from a specific earmark. +- `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a323ff0aafcc8cbd46a797f00e178f065e0f440 --- /dev/null +++ b/frame/bounties/src/benchmarking.rs @@ -0,0 +1,247 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! bounties pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use sp_runtime::traits::Bounded; +use frame_system::{EventRecord, RawOrigin}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_support::traits::OnInitialize; + +use crate::Module as Bounties; +use pallet_treasury::Module as Treasury; + +const SEED: u32 = 0; + +// Create bounties that are approved for use in `on_initialize`. +fn create_approved_bounties(n: u32) -> Result<(), &'static str> { + for i in 0 .. n { + let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + } + ensure!(BountyApprovals::get().len() == n as usize, "Not all bounty approved"); + Ok(()) +} + +// Create the pre-requisite information needed to create a treasury `propose_bounty`. +fn setup_bounty(u: u32, d: u32) -> ( + T::AccountId, + T::AccountId, + BalanceOf, + BalanceOf, + Vec, +) { + let caller = account("caller", u, SEED); + let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); + let fee = value / 2u32.into(); + let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * MAX_BYTES.into(); + let _ = T::Currency::make_free_balance_be(&caller, deposit); + let curator = account("curator", u, SEED); + let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); + let reason = vec![0; d as usize]; + (caller, curator, fee, value, reason) +} + +fn create_bounty() -> Result<( + ::Source, + BountyIndex, +), &'static str> { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; + Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; + Ok((curator_lookup, bounty_id)) +} + +fn setup_pod_account() { + let pot_account = Bounties::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); +} + +fn assert_last_event(generic_event: ::Event) { + let events = frame_system::Module::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +const MAX_BYTES: u32 = 16384; + +benchmarks! { + _ { } + + propose_bounty { + let d in 0 .. MAX_BYTES; + + let (caller, curator, fee, value, description) = setup_bounty::(0, d); + }: _(RawOrigin::Signed(caller), value, description) + + approve_bounty { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + }: _(RawOrigin::Root, bounty_id) + + propose_curator { + setup_pod_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Bounties::::on_initialize(T::BlockNumber::zero()); + }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) + + // Worst case when curator is inactive and any sender unassigns the curator. + unassign_curator { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::get() - 1; + frame_system::Module::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), bounty_id) + + accept_curator { + setup_pod_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Bounties::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; + }: _(RawOrigin::Signed(curator), bounty_id) + + award_bounty { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); + }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) + + claim_bounty { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + + let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); + let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); + Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; + + frame_system::Module::::set_block_number(T::BountyDepositPayoutDelay::get()); + ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary already has balance"); + + }: _(RawOrigin::Signed(curator), bounty_id) + verify { + ensure!(!T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary didn't get paid"); + } + + close_bounty_proposed { + setup_pod_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + + close_bounty_active { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + verify { + assert_last_event::(RawEvent::BountyCanceled(bounty_id).into()) + } + + extend_bounty_expiry { + setup_pod_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup)?; + }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) + verify { + assert_last_event::(RawEvent::BountyExtended(bounty_id).into()) + } + + spend_funds { + let b in 1 .. 100; + setup_pod_account::(); + create_approved_bounties::(b)?; + + let mut budget_remaining = BalanceOf::::max_value(); + let mut imbalance = PositiveImbalanceOf::::zero(); + let mut total_weight = Weight::zero(); + let mut missed_any = false; + }: { + as pallet_treasury::SpendFunds>::spend_funds( + &mut budget_remaining, + &mut imbalance, + &mut total_weight, + &mut missed_any, + ); + } + verify { + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); + ensure!(missed_any == false, "Missed some"); + assert_last_event::(RawEvent::BountyBecameActive(b - 1).into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_propose_bounty::()); + assert_ok!(test_benchmark_approve_bounty::()); + assert_ok!(test_benchmark_propose_curator::()); + assert_ok!(test_benchmark_unassign_curator::()); + assert_ok!(test_benchmark_accept_curator::()); + assert_ok!(test_benchmark_award_bounty::()); + assert_ok!(test_benchmark_claim_bounty::()); + assert_ok!(test_benchmark_close_bounty_proposed::()); + assert_ok!(test_benchmark_close_bounty_active::()); + assert_ok!(test_benchmark_extend_bounty_expiry::()); + assert_ok!(test_benchmark_spend_funds::()); + }); + } +} diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..32a377472622ed07933f598db3d52f5bfdade017 --- /dev/null +++ b/frame/bounties/src/lib.rs @@ -0,0 +1,757 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Bounties Module ( pallet-bounties ) +//! +//! ## Bounty +//! +//! > NOTE: This pallet is tightly coupled with pallet-treasury. +//! +//! A Bounty Spending is a reward for a specified body of work - or specified set of objectives - +//! that needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned +//! after the bounty is approved and funded by Council, to be delegated with the responsibility of +//! assigning a payout address once the specified set of objectives is completed. +//! +//! After the Council has activated a bounty, it delegates the work that requires expertise to a +//! curator in exchange of a deposit. Once the curator accepts the bounty, they get to close the +//! active bounty. Closing the active bounty enacts a delayed payout to the payout address, the +//! curator fee and the return of the curator deposit. The delay allows for intervention through +//! regular democracy. The Council gets to unassign the curator, resulting in a new curator +//! election. The Council also gets to cancel the bounty if deemed necessary before assigning a +//! curator or once the bounty is active or payout is pending, resulting in the slash of the +//! curator's deposit. +//! +//! +//! ### Terminology +//! +//! Bounty: +//! - **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion +//! by the Treasury. +//! - **Proposer:** An account proposing a bounty spending. +//! - **Curator:** An account managing the bounty and assigning a payout address receiving the +//! reward for the completion of work. +//! - **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on +//! deposit per byte within the bounty description. +//! - **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The +//! deposit is returned when/if the bounty is completed. +//! - **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is +//! rewarded. +//! - **Payout address:** The account to which the total or part of the bounty is assigned to. +//! - **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before +//! claiming. +//! - **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! Bounty protocol: +//! - `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of +//! tasks and stake the required deposit. +//! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of +//! work. +//! - `propose_curator` - Assign an account to a bounty as candidate curator. +//! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +//! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +//! - `award_bounty` - Close and pay out the specified amount for the completed work. +//! - `claim_bounty` - Claim a specific bounty amount from the Payout Address. +//! - `unassign_curator` - Unassign an accepted curator from a specific earmark. +//! - `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod tests; +mod benchmarking; +pub mod weights; + +use sp_std::prelude::*; + +use frame_support::{decl_module, decl_storage, decl_event, ensure, decl_error}; + +use frame_support::traits::{ + Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{AllowDeath}, + ReservableCurrency}; + +use sp_runtime::{Permill, RuntimeDebug, DispatchResult, traits::{ + Zero, StaticLookup, AccountIdConversion, Saturating, BadOrigin +}}; + +use frame_support::dispatch::DispatchResultWithPostInfo; +use frame_support::traits::{EnsureOrigin}; + +use frame_support::weights::{Weight}; + +use codec::{Encode, Decode}; +use frame_system::{self as system, ensure_signed}; +pub use weights::WeightInfo; + +type BalanceOf = pallet_treasury::BalanceOf; + +type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf; + +pub trait Config: frame_system::Config + pallet_treasury::Config { + + /// The amount held on deposit for placing a bounty proposal. + type BountyDepositBase: Get>; + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + type BountyDepositPayoutDelay: Get; + + /// Bounty duration in blocks. + type BountyUpdatePeriod: Get; + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + type BountyCuratorDeposit: Get; + + /// Minimum value for a bounty. + type BountyValueMinimum: Get>; + + /// The amount held on deposit per byte within the tip report reason or bounty description. + type DataDepositPerByte: Get>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// Maximum acceptable reason length. + type MaximumReasonLength: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +/// An index of a bounty. Just a `u32`. +pub type BountyIndex = u32; + +/// A bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct Bounty { + /// The account proposing it. + proposer: AccountId, + /// The (total) amount that should be paid if the bounty is rewarded. + value: Balance, + /// The curator fee. Included in value. + fee: Balance, + /// The deposit of curator. + curator_deposit: Balance, + /// The amount held on deposit (reserved) for making this proposal. + bond: Balance, + /// The status of this bounty. + status: BountyStatus, +} + +/// The status of a bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum BountyStatus { + /// The bounty is proposed and waiting for approval. + Proposed, + /// The bounty is approved and waiting to become active at next spend period. + Approved, + /// The bounty is funded and waiting for curator assignment. + Funded, + /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the curator. + CuratorProposed { + /// The assigned curator of this bounty. + curator: AccountId, + }, + /// The bounty is active and waiting to be awarded. + Active { + /// The curator of this bounty. + curator: AccountId, + /// An update from the curator is due by this block, else they are considered inactive. + update_due: BlockNumber, + }, + /// The bounty is awarded and waiting to released after a delay. + PendingPayout { + /// The curator of this bounty. + curator: AccountId, + /// The beneficiary of the bounty. + beneficiary: AccountId, + /// When the bounty can be claimed. + unlock_at: BlockNumber, + }, +} + +// Note :: For backward compatability reasons, +// pallet-bounties uses Treasury for storage. +// This is temporary solution, soon will get replaced with +// Own storage identifier. +decl_storage! { + trait Store for Module as Treasury { + + /// Number of bounty proposals that have been made. + pub BountyCount get(fn bounty_count): BountyIndex; + + /// Bounties that have been made. + pub Bounties get(fn bounties): + map hasher(twox_64_concat) BountyIndex + => Option, T::BlockNumber>>; + + /// The description of each bounty. + pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; + + /// Bounty indices that have been approved but not yet funded. + pub BountyApprovals get(fn bounty_approvals): Vec; + } +} + +decl_event!( + pub enum Event + where + Balance = BalanceOf, + ::AccountId, + { + /// New bounty proposal. \[index\] + BountyProposed(BountyIndex), + /// A bounty proposal was rejected; funds were slashed. \[index, bond\] + BountyRejected(BountyIndex, Balance), + /// A bounty proposal is funded and became active. \[index\] + BountyBecameActive(BountyIndex), + /// A bounty is awarded to a beneficiary. \[index, beneficiary\] + BountyAwarded(BountyIndex, AccountId), + /// A bounty is claimed by beneficiary. \[index, payout, beneficiary\] + BountyClaimed(BountyIndex, Balance, AccountId), + /// A bounty is cancelled. \[index\] + BountyCanceled(BountyIndex), + /// A bounty expiry is extended. \[index\] + BountyExtended(BountyIndex), + } +); + +decl_error! { + /// Error for the treasury module. + pub enum Error for Module { + /// Proposer's balance is too low. + InsufficientProposersBalance, + /// No proposal or bounty at that index. + InvalidIndex, + /// The reason given is just too big. + ReasonTooBig, + /// The bounty status is unexpected. + UnexpectedStatus, + /// Require bounty curator. + RequireCurator, + /// Invalid bounty value. + InvalidValue, + /// Invalid bounty fee. + InvalidFee, + /// A bounty payout is pending. + /// To cancel the bounty, you must unassign and slash the curator. + PendingPayout, + /// The bounties cannot be claimed/closed because it's still in the countdown period. + Premature, + } +} + +decl_module! { + pub struct Module + for enum Call + where origin: T::Origin + { + /// The amount held on deposit per byte within bounty description. + const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); + + /// The amount held on deposit for placing a bounty proposal. + const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); + + /// Minimum value for a bounty. + const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); + + /// Maximum acceptable reason length. + const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); + + type Error = Error; + + fn deposit_event() = default; + + /// Propose a new bounty. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `DataDepositPerByte` for each byte in `reason`. It will be unreserved upon approval, + /// or slashed when rejected. + /// + /// - `curator`: The curator account whom will manage this bounty. + /// - `fee`: The curator fee. + /// - `value`: The total payment amount of this bounty, curator fee included. + /// - `description`: The description of this bounty. + #[weight = ::WeightInfo::propose_bounty(description.len() as u32)] + fn propose_bounty( + origin, + #[compact] value: BalanceOf, + description: Vec, + ) { + let proposer = ensure_signed(origin)?; + Self::create_bounty(proposer, description, value)?; + } + + /// Approve a bounty proposal. At a later time, the bounty will be funded and become active + /// and the original deposit will be returned. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::approve_bounty()] + fn approve_bounty(origin, #[compact] bounty_id: BountyIndex) { + T::ApproveOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); + + bounty.status = BountyStatus::Approved; + + BountyApprovals::append(bounty_id); + + Ok(()) + })?; + } + + /// Assign a curator to a funded bounty. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::propose_curator()] + fn propose_curator( + origin, + #[compact] bounty_id: BountyIndex, + curator: ::Source, + #[compact] fee: BalanceOf, + ) { + T::ApproveOrigin::ensure_origin(origin)?; + + let curator = T::Lookup::lookup(curator)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match bounty.status { + BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => {}, + _ => return Err(Error::::UnexpectedStatus.into()), + }; + + ensure!(fee < bounty.value, Error::::InvalidFee); + + bounty.status = BountyStatus::CuratorProposed { curator }; + bounty.fee = fee; + + Ok(()) + })?; + } + + /// Unassign curator from a bounty. + /// + /// This function can only be called by the `RejectOrigin` a signed origin. + /// + /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious + /// or inactive. As a result, we will slash the curator when possible. + /// + /// If the origin is the curator, we take this as a sign they are unable to do their job and + /// they willingly give up. We could slash them, but for now we allow them to recover their + /// deposit and exit without issue. (We may want to change this if it is abused.) + /// + /// Finally, the origin can be anyone if and only if the curator is "inactive". This allows + /// anyone in the community to call out that a curator is not doing their due diligence, and + /// we should pick a new curator. In this case the curator should also be slashed. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::unassign_curator()] + fn unassign_curator( + origin, + #[compact] bounty_id: BountyIndex, + ) { + let maybe_sender = ensure_signed(origin.clone()) + .map(Some) + .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; + + match bounty.status { + BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { + // No curator to unassign at this point. + return Err(Error::::UnexpectedStatus.into()) + } + BountyStatus::CuratorProposed { ref curator } => { + // A curator has been proposed, but not accepted yet. + // Either `RejectOrigin` or the proposed curator can unassign the curator. + ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); + }, + BountyStatus::Active { ref curator, ref update_due } => { + // The bounty is active. + match maybe_sender { + // If the `RejectOrigin` is calling this function, slash the curator. + None => { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + }, + Some(sender) => { + // If the sender is not the curator, and the curator is inactive, + // slash the curator. + if sender != *curator { + let block_number = system::Module::::block_number(); + if *update_due < block_number { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } else { + // Curator has more time to give an update. + return Err(Error::::Premature.into()) + } + } else { + // Else this is the curator, willingly giving up their role. + // Give back their deposit. + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + // Continue to change bounty status below... + } + }, + } + }, + BountyStatus::PendingPayout { ref curator, .. } => { + // The bounty is pending payout, so only council can unassign a curator. + // By doing so, they are claiming the curator is acting maliciously, so + // we slash the curator. + ensure!(maybe_sender.is_none(), BadOrigin); + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } + }; + + bounty.status = BountyStatus::Funded; + Ok(()) + })?; + } + + /// Accept the curator role for a bounty. + /// A deposit will be reserved from curator and refund upon successful payout. + /// + /// May only be called from the curator. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::accept_curator()] + fn accept_curator(origin, #[compact] bounty_id: BountyIndex) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::CuratorProposed { ref curator } => { + ensure!(signer == *curator, Error::::RequireCurator); + + let deposit = T::BountyCuratorDeposit::get() * bounty.fee; + T::Currency::reserve(curator, deposit)?; + bounty.curator_deposit = deposit; + + let update_due = system::Module::::block_number() + T::BountyUpdatePeriod::get(); + bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; + + Ok(()) + }, + _ => Err(Error::::UnexpectedStatus.into()), + } + })?; + } + + /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to award. + /// - `beneficiary`: The beneficiary account whom will receive the payout. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::award_bounty()] + fn award_bounty(origin, #[compact] bounty_id: BountyIndex, beneficiary: ::Source) { + let signer = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match &bounty.status { + BountyStatus::Active { + curator, + .. + } => { + ensure!(signer == *curator, Error::::RequireCurator); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + bounty.status = BountyStatus::PendingPayout { + curator: signer, + beneficiary: beneficiary.clone(), + unlock_at: system::Module::::block_number() + T::BountyDepositPayoutDelay::get(), + }; + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); + } + + /// Claim the payout from an awarded bounty after payout delay. + /// + /// The dispatch origin for this call must be the beneficiary of this bounty. + /// + /// - `bounty_id`: Bounty ID to claim. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::claim_bounty()] + fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { + let _ = ensure_signed(origin)?; // anyone can trigger claim + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; + if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { + ensure!(system::Module::::block_number() >= unlock_at, Error::::Premature); + let bounty_account = Self::bounty_account_id(bounty_id); + let balance = T::Currency::free_balance(&bounty_account); + let fee = bounty.fee.min(balance); // just to be safe + let payout = balance.saturating_sub(fee); + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + let _ = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail + let _ = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail + *maybe_bounty = None; + + BountyDescriptions::remove(bounty_id); + + Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); + Ok(()) + } else { + Err(Error::::UnexpectedStatus.into()) + } + })?; + } + + /// Cancel a proposed or active bounty. All the funds will be sent to treasury and + /// the curator deposit will be unreserved if possible. + /// + /// Only `T::RejectOrigin` is able to cancel a bounty. + /// + /// - `bounty_id`: Bounty ID to cancel. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::close_bounty_proposed().max(::WeightInfo::close_bounty_active())] + fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { + T::RejectOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { + let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; + + match &bounty.status { + BountyStatus::Proposed => { + // The reject origin would like to cancel a proposed bounty. + BountyDescriptions::remove(bounty_id); + let value = bounty.bond; + let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; + T::OnSlash::on_unbalanced(imbalance); + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyRejected(bounty_id, value)); + // Return early, nothing else to do. + return Ok(Some(::WeightInfo::close_bounty_proposed()).into()) + }, + BountyStatus::Approved => { + // For weight reasons, we don't allow a council to cancel in this phase. + // We ask for them to wait until it is funded before they can cancel. + return Err(Error::::UnexpectedStatus.into()) + }, + BountyStatus::Funded | + BountyStatus::CuratorProposed { .. } => { + // Nothing extra to do besides the removal of the bounty below. + }, + BountyStatus::Active { curator, .. } => { + // Cancelled by council, refund deposit of the working curator. + let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); + // Then execute removal of the bounty below. + }, + BountyStatus::PendingPayout { .. } => { + // Bounty is already pending payout. If council wants to cancel + // this bounty, it should mean the curator was acting maliciously. + // So the council should first unassign the curator, slashing their + // deposit. + return Err(Error::::PendingPayout.into()) + } + } + + let bounty_account = Self::bounty_account_id(bounty_id); + + BountyDescriptions::remove(bounty_id); + + let balance = T::Currency::free_balance(&bounty_account); + let _ = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyCanceled(bounty_id)); + Ok(Some(::WeightInfo::close_bounty_active()).into()) + }) + } + + /// Extend the expiry time of an active bounty. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to extend. + /// - `remark`: additional information. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::extend_bounty_expiry()] + fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::Active { ref curator, ref mut update_due } => { + ensure!(*curator == signer, Error::::RequireCurator); + *update_due = (system::Module::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyExtended(bounty_id)); + } + } +} + +impl Module { + // Add public immutables and private mutables. + + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::ModuleId::get().into_account() + } + + /// The account ID of a bounty account + pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { + // only use two byte prefix to support 16 byte account id (used by test) + // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index + T::ModuleId::get().into_sub_account(("bt", id)) + } + + fn create_bounty( + proposer: T::AccountId, + description: Vec, + value: BalanceOf, + ) -> DispatchResult { + ensure!(description.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); + ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); + + let index = Self::bounty_count(); + + // reserve deposit for new bounty + let bond = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * (description.len() as u32).into(); + T::Currency::reserve(&proposer, bond) + .map_err(|_| Error::::InsufficientProposersBalance)?; + + BountyCount::put(index + 1); + + let bounty = Bounty { + proposer, + value, + fee: 0u32.into(), + curator_deposit: 0u32.into(), + bond, + status: BountyStatus::Proposed, + }; + + Bounties::::insert(index, &bounty); + BountyDescriptions::insert(index, description); + + Self::deposit_event(RawEvent::BountyProposed(index)); + + Ok(()) + } + +} + +impl pallet_treasury::SpendFunds for Module { + fn spend_funds( + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, + total_weight: &mut Weight, + missed_any: &mut bool + ) { + let bounties_len = BountyApprovals::mutate(|v| { + let bounties_approval_len = v.len() as u32; + v.retain(|&index| { + Bounties::::mutate(index, |bounty| { + // Should always be true, but shouldn't panic if false or we're screwed. + if let Some(bounty) = bounty { + if bounty.value <= *budget_remaining { + *budget_remaining -= bounty.value; + + bounty.status = BountyStatus::Funded; + + // return their deposit. + let _ = T::Currency::unreserve(&bounty.proposer, bounty.bond); + + // fund the bounty account + imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); + + Self::deposit_event(RawEvent::BountyBecameActive(index)); + false + } else { + *missed_any = true; + true + } + } else { + false + } + }) + }); + bounties_approval_len + }); + + *total_weight += ::WeightInfo::spend_funds(bounties_len); + } +} diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ebff64b4e4823de5ab5946dcdec42529f5eeb31 --- /dev/null +++ b/frame/bounties/src/tests.rs @@ -0,0 +1,903 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! bounties pallet tests. + +#![cfg(test)] + +use super::*; +use std::cell::RefCell; + +use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, parameter_types, weights::Weight, + impl_outer_event, traits::{OnInitialize} +}; + +use sp_core::H256; +use sp_runtime::{ + Perbill, ModuleId, + testing::Header, + traits::{BlakeTwo256, IdentityLookup, BadOrigin}, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +mod bounties { + // Re-export needed for `impl_outer_event!`. + pub use crate::*; +} + +impl_outer_event! { + pub enum Event for Test { + system, + pallet_balances, + pallet_treasury, + bounties, + } +} + +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} +parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const DataDepositPerByte: u64 = 1; + pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); +} +// impl pallet_treasury::Config for Test { +impl pallet_treasury::Config for Test { + type ModuleId = TreasuryModuleId; + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = Bounties; +} +parameter_types! { + pub const BountyDepositBase: u64 = 80; + pub const BountyDepositPayoutDelay: u64 = 3; + pub const BountyUpdatePeriod: u32 = 20; + pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); + pub const BountyValueMinimum: u64 = 1; + pub const MaximumReasonLength: u32 = 16384; +} +impl Config for Test { + type Event = Event; + type BountyDepositBase = BountyDepositBase; + type BountyDepositPayoutDelay = BountyDepositPayoutDelay; + type BountyUpdatePeriod = BountyUpdatePeriod; + type BountyCuratorDeposit = BountyCuratorDeposit; + type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type WeightInfo = (); +} +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Treasury = pallet_treasury::Module; +type Bounties = Module; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + t.into() +} + +fn last_event() -> RawEvent { + System::events().into_iter().map(|r| r.event) + .filter_map(|e| { + if let Event::bounties(inner) = e { Some(inner) } else { None } + }) + .last() + .unwrap() +} + +#[test] +fn genesis_config_works() { + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); +} + +#[test] +fn minting_works() { + new_test_ext().execute_with(|| { + // Check that accumulate works when we have Some value in Dummy already. + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn spend_proposal_takes_min_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_eq!(Balances::free_balance(0), 99); + assert_eq!(Balances::reserved_balance(0), 1); + }); +} + +#[test] +fn spend_proposal_takes_proportional_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + }); +} + +#[test] +fn spend_proposal_fails_when_proposer_poor() { + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::propose_spend(Origin::signed(2), 100, 3), + Error::::InsufficientProposersBalance, + ); + }); +} + +#[test] +fn accepted_spend_proposal_ignored_outside_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(1); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn unused_pot_should_diminish() { + new_test_ext().execute_with(|| { + let init_total_issuance = Balances::total_issuance(); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Balances::total_issuance(), init_total_issuance + 100); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 50); + assert_eq!(Balances::total_issuance(), init_total_issuance + 50); + }); +} + +#[test] +fn rejected_spend_proposal_ignored_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 50); + }); +} + +#[test] +fn reject_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); + }); +} + +#[test] +fn reject_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); + }); +} + +#[test] +fn accept_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); + }); +} + +#[test] +fn accept_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); + }); +} + +#[test] +fn accepted_spend_proposal_enacted_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Treasury::pot(), 0); + }); +} + +#[test] +fn pot_underflow_should_not_diminish() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); + >::on_initialize(4); + assert_eq!(Balances::free_balance(3), 150); // Fund has been spent + assert_eq!(Treasury::pot(), 25); // Pot has finally changed + }); +} + +// Treasury account doesn't get deleted if amount approved to spend is all its free balance. +// i.e. pot should not include existential deposit needed for account survival. +#[test] +fn treasury_account_doesnt_get_deleted() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + let treasury_balance = Balances::free_balance(&Treasury::account_id()); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + + >::on_initialize(4); + assert_eq!(Treasury::pot(), 0); // Pot is emptied + assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there + }); +} + +// In case treasury account is not existing then it works fine. +// This is useful for chain that will just update runtime. +#[test] +fn inexistent_account_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + balances: vec![(0, 100), (1, 99), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + // Treasury genesis config is not build thus treasury account does not exist + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist + assert_eq!(Treasury::pot(), 0); // Pot is empty + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + >::on_initialize(2); + assert_eq!(Treasury::pot(), 0); // Pot hasn't changed + assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed + + Balances::make_free_balance_be(&Treasury::account_id(), 100); + assert_eq!(Treasury::pot(), 99); // Pot now contains funds + assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist + + >::on_initialize(4); + + assert_eq!(Treasury::pot(), 0); // Pot has changed + assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed + }); +} + +#[test] +fn propose_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); + + assert_eq!(last_event(), RawEvent::BountyProposed(0)); + + let deposit: u64 = 85 + 5; + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 10, + bond: deposit, + status: BountyStatus::Proposed, + }); + + assert_eq!(Bounties::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); + + assert_eq!(Bounties::bounty_count(), 1); + }); +} + +#[test] +fn propose_bounty_validation_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), + Error::::ReasonTooBig + ); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), + Error::::InsufficientProposersBalance + ); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), + Error::::InvalidValue + ); + }); +} + +#[test] +fn close_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); + + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); + + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!(Bounties::bounties(0), None); + assert!(!pallet_treasury::Proposals::::contains_key(0)); + + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn approve_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Bounties::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + value: 50, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Approved, + }); + assert_eq!(Bounties::bounty_approvals(), vec![0]); + + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); + + // deposit not returned yet + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + >::on_initialize(2); + + // return deposit + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: deposit, + status: BountyStatus::Funded, + }); + + assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); + }); +} + +#[test] +fn assign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_noop!(Bounties::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_noop!(Bounties::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { + curator: 4, + }, + }); + + assert_noop!(Bounties::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); + assert_noop!(Bounties::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::Active { + curator: 4, + update_due: 22, + }, + }); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 2); + }); +} + +#[test] +fn unassign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + assert_noop!(Bounties::unassign_curator(Origin::signed(1), 0), BadOrigin); + + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 + }); +} + + +#[test] +fn award_and_claim_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit + + assert_noop!(Bounties::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator); + + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::PendingPayout { + curator: 4, + beneficiary: 3, + unlock_at: 5 + }, + }); + + assert_noop!(Bounties::claim_bounty(Origin::signed(1), 0), Error::::Premature); + + System::set_block_number(5); + >::on_initialize(5); + + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); + + assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 + + assert_eq!(Balances::free_balance(3), 56); + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn claim_handles_high_fee() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 30); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 49)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + + System::set_block_number(5); + >::on_initialize(5); + + // make fee > balance + let _ = Balances::slash(&Bounties::bounty_account_id(0), 10); + + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); + + assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn cancel_and_refund() { + new_test_ext().execute_with(|| { + + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 60); + + assert_noop!(Bounties::close_bounty(Origin::signed(0), 0), BadOrigin); + + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + assert_eq!(Treasury::pot(), 85); // - 25 + 10 + + }); + +} + +#[test] +fn award_and_cancel() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 0, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(0), 0)); + + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + + assert_ok!(Bounties::award_bounty(Origin::signed(0), 0, 3)); + + // Cannot close bounty directly when payout is happening... + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::PendingPayout); + + // Instead unassign the curator to slash them and then close. + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + assert_eq!(last_event(), RawEvent::BountyCanceled(0)); + + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + // Slashed. + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn expire_and_unassign() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 1, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 5); + + System::set_block_number(22); + >::on_initialize(22); + + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + + System::set_block_number(23); + >::on_initialize(23); + + assert_ok!(Bounties::unassign_curator(Origin::signed(0), 0)); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + }); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 0); // slashed + + }); +} + +#[test] +fn extend_expiry() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + assert_noop!(Bounties::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 5); + assert_eq!(Balances::reserved_balance(4), 5); + + System::set_block_number(10); + >::on_initialize(10); + + assert_noop!(Bounties::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator); + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, + }); + + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!(Bounties::bounties(0).unwrap(), Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same + }); + + System::set_block_number(25); + >::on_initialize(25); + + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 10); // not slashed + assert_eq!(Balances::reserved_balance(4), 0); + }); +} + +#[test] +fn genesis_funding_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let initial_funding = 100; + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized with 100. + balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), initial_funding); + assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); + }); +} diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..6ba1b9d32b103dc0c4b6d7031ef35fb2413d9509 --- /dev/null +++ b/frame/bounties/src/weights.rs @@ -0,0 +1,189 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_bounties +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 +//! DATE: 2020-12-16, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bounties +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/bounties/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_bounties. +pub trait WeightInfo { + fn propose_bounty(d: u32, ) -> Weight; + fn approve_bounty() -> Weight; + fn propose_curator() -> Weight; + fn unassign_curator() -> Weight; + fn accept_curator() -> Weight; + fn award_bounty() -> Weight; + fn claim_bounty() -> Weight; + fn close_bounty_proposed() -> Weight; + fn close_bounty_active() -> Weight; + fn extend_bounty_expiry() -> Weight; + fn spend_funds(b: u32, ) -> Weight; +} + +/// Weights for pallet_bounties using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn propose_bounty(d: u32, ) -> Weight { + (64_778_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn approve_bounty() -> Weight { + (18_293_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn propose_curator() -> Weight { + (14_248_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn unassign_curator() -> Weight { + (52_100_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn accept_curator() -> Weight { + (52_564_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn award_bounty() -> Weight { + (37_426_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn claim_bounty() -> Weight { + (176_077_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn close_bounty_proposed() -> Weight { + (51_162_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn close_bounty_active() -> Weight { + (116_907_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn extend_bounty_expiry() -> Weight { + (36_419_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn spend_funds(b: u32, ) -> Weight { + (7_562_000 as Weight) + // Standard Error: 16_000 + .saturating_add((77_328_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn propose_bounty(d: u32, ) -> Weight { + (64_778_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn approve_bounty() -> Weight { + (18_293_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn propose_curator() -> Weight { + (14_248_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn unassign_curator() -> Weight { + (52_100_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn accept_curator() -> Weight { + (52_564_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn award_bounty() -> Weight { + (37_426_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn claim_bounty() -> Weight { + (176_077_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn close_bounty_proposed() -> Weight { + (51_162_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn close_bounty_active() -> Weight { + (116_907_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn extend_bounty_expiry() -> Weight { + (36_419_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn spend_funds(b: u32, ) -> Weight { + (7_562_000 as Weight) + // Standard Error: 16_000 + .saturating_add((77_328_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index d4e80d515941fea3b02d588177f3f0108c71195e..551d6c7856cda3941b850868a28d5bf2cf37f87d 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -33,9 +33,9 @@ const SEED: u32 = 0; const MAX_BYTES: u32 = 1_024; -fn assert_last_event, I: Instance>(generic_event: >::Event) { +fn assert_last_event, I: Instance>(generic_event: >::Event) { let events = System::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index b7d561672b82f1b30e650fa2e42b6d07ba130a31..efc8626d6892a6675af2e70a183e4cb2125fa82e 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -56,7 +56,7 @@ use frame_support::{ }, ensure, traits::{ChangeMembers, EnsureOrigin, Get, InitializeMembers}, - weights::{DispatchClass, GetDispatchInfo, Weight}, + weights::{DispatchClass, GetDispatchInfo, Weight, Pays}, }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -121,18 +121,18 @@ impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote { } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The outer origin type. type Origin: From>; /// The outer call dispatch type. type Proposal: Parameter - + Dispatchable>::Origin, PostInfo=PostDispatchInfo> + + Dispatchable>::Origin, PostInfo=PostDispatchInfo> + From> + GetDispatchInfo; /// The outer event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The time-out for council motions. type MotionDuration: Get; @@ -166,7 +166,7 @@ pub enum RawOrigin { } /// Origin for the collective module. -pub type Origin = RawOrigin<::AccountId, I>; +pub type Origin = RawOrigin<::AccountId, I>; #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] /// Info for keeping track of a motion being voted on. @@ -184,12 +184,12 @@ pub struct Votes { } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Collective { + trait Store for Module, I: Instance=DefaultInstance> as Collective { /// The hashes of the active proposals. pub Proposals get(fn proposals): Vec; /// Actual proposal for a given hash, if it's current. pub ProposalOf get(fn proposal_of): - map hasher(identity) T::Hash => Option<>::Proposal>; + map hasher(identity) T::Hash => Option<>::Proposal>; /// Votes on a given proposal, if it is ongoing. pub Voting get(fn voting): map hasher(identity) T::Hash => Option>; @@ -209,8 +209,8 @@ decl_storage! { decl_event! { pub enum Event where - ::Hash, - ::AccountId, + ::Hash, + ::AccountId, { /// A motion (given hash) has been proposed (by given account) with a threshold (given /// `MemberCount`). @@ -239,7 +239,7 @@ decl_event! { } decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Account is not a member NotMember, /// Duplicate proposals not allowed @@ -276,7 +276,7 @@ fn get_result_weight(result: DispatchResultWithPostInfo) -> Option { // Note that councillor operations are assigned to the operational class. decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { type Error = Error; fn deposit_event() = default; @@ -365,7 +365,7 @@ decl_module! { DispatchClass::Operational )] fn execute(origin, - proposal: Box<>::Proposal>, + proposal: Box<>::Proposal>, #[compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -432,7 +432,7 @@ decl_module! { )] fn propose(origin, #[compact] threshold: MemberCount, - proposal: Box<>::Proposal>, + proposal: Box<>::Proposal>, #[compact] length_bound: u32 ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -488,6 +488,8 @@ decl_module! { /// /// Requires the sender to be a member. /// + /// Transaction fees will be waived if the member is voting on any particular proposal + /// for the first time and the call is successful. Subsequent vote changes will charge a fee. /// # /// ## Weight /// - `O(M)` where `M` is members-count (code- and governance-bounded) @@ -515,6 +517,9 @@ decl_module! { let position_yes = voting.ayes.iter().position(|a| a == &who); let position_no = voting.nays.iter().position(|a| a == &who); + // Detects first vote of the member in the motion + let is_account_voting_first_time = position_yes.is_none() && position_no.is_none(); + if approve { if position_yes.is_none() { voting.ayes.push(who.clone()); @@ -541,7 +546,17 @@ decl_module! { Voting::::insert(&proposal, voting); - Ok(Some(T::WeightInfo::vote(members.len() as u32)).into()) + if is_account_voting_first_time { + Ok(( + Some(T::WeightInfo::vote(members.len() as u32)), + Pays::No, + ).into()) + } else { + Ok(( + Some(T::WeightInfo::vote(members.len() as u32)), + Pays::Yes, + ).into()) + } } /// Close a vote that is either approved, disapproved or whose voting period has ended. @@ -554,6 +569,9 @@ decl_module! { /// If called after the end of the voting period abstentions are counted as rejections /// unless there is a prime member set and the prime member cast an approval. /// + /// If the close operation completes successfully with disapproval, the transaction fee will + /// be waived. Otherwise execution of the approved operation will be charged to the caller. + /// /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed proposal. /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via /// `storage::read` so it is `size_of::() == 4` larger than the pure length. @@ -606,20 +624,23 @@ decl_module! { let (proposal, len) = Self::validate_and_get_proposal( &proposal_hash, length_bound, - proposal_weight_bound + proposal_weight_bound, )?; Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let (proposal_weight, proposal_count) = Self::do_approve_proposal(seats, voting, proposal_hash, proposal); - return Ok(Some( - T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight) + return Ok(( + Some(T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight)), + Pays::Yes, ).into()); + } else if disapproved { Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let proposal_count = Self::do_disapprove_proposal(proposal_hash); - return Ok(Some( - T::WeightInfo::close_early_disapproved(seats, proposal_count) + return Ok(( + Some(T::WeightInfo::close_early_disapproved(seats, proposal_count)), + Pays::No, ).into()); } @@ -642,20 +663,22 @@ decl_module! { let (proposal, len) = Self::validate_and_get_proposal( &proposal_hash, length_bound, - proposal_weight_bound + proposal_weight_bound, )?; Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let (proposal_weight, proposal_count) = Self::do_approve_proposal(seats, voting, proposal_hash, proposal); - return Ok(Some( - T::WeightInfo::close_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight) + return Ok(( + Some(T::WeightInfo::close_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight)), + Pays::Yes, ).into()); } else { Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); let proposal_count = Self::do_disapprove_proposal(proposal_hash); - return Ok(Some( - T::WeightInfo::close_disapproved(seats, proposal_count) + return Ok(( + Some(T::WeightInfo::close_disapproved(seats, proposal_count)), + Pays::No, ).into()); } } @@ -682,7 +705,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { /// Check whether `who` is a member of the collective. pub fn is_member(who: &T::AccountId) -> bool { // Note: The dispatchables *do not* use this to check membership so make sure @@ -698,7 +721,7 @@ impl, I: Instance> Module { hash: &T::Hash, length_bound: u32, weight_bound: Weight - ) -> Result<(>::Proposal, usize), DispatchError> { + ) -> Result<(>::Proposal, usize), DispatchError> { let key = ProposalOf::::hashed_key_for(hash); // read the length of the proposal storage entry directly let proposal_len = storage::read(&key, &mut [0; 0], 0) @@ -728,7 +751,7 @@ impl, I: Instance> Module { seats: MemberCount, voting: Votes, proposal_hash: T::Hash, - proposal: >::Proposal, + proposal: >::Proposal, ) -> (Weight, u32) { Self::deposit_event(RawEvent::Approved(proposal_hash)); @@ -764,7 +787,7 @@ impl, I: Instance> Module { } } -impl, I: Instance> ChangeMembers for Module { +impl, I: Instance> ChangeMembers for Module { /// Update the members of the collective. Votes are updated and the prime is reset. /// /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but @@ -819,7 +842,7 @@ impl, I: Instance> ChangeMembers for Module { } } -impl, I: Instance> InitializeMembers for Module { +impl, I: Instance> InitializeMembers for Module { fn initialize_members(members: &[T::AccountId]) { if !members.is_empty() { assert!(>::get().is_empty(), "Members are already initialized!"); @@ -933,27 +956,29 @@ impl< #[cfg(test)] mod tests { use super::*; - use frame_support::{Hashable, assert_ok, assert_noop, parameter_types, weights::Weight}; + use frame_support::{Hashable, assert_ok, assert_noop, parameter_types}; use frame_system::{self as system, EventRecord, Phase}; use hex_literal::hex; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, testing::Header, + traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, testing::Header, BuildStorage, }; use crate as collective; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const MotionDuration: u64 = 3; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -965,13 +990,6 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -979,7 +997,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); } - impl Trait for Test { + impl Config for Test { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -989,7 +1007,7 @@ mod tests { type DefaultVote = PrimeDefaultVote; type WeightInfo = (); } - impl Trait for Test { + impl Config for Test { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -999,7 +1017,7 @@ mod tests { type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; type WeightInfo = (); } - impl Trait for Test { + impl Config for Test { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -1441,6 +1459,96 @@ mod tests { }); } + #[test] + fn motions_all_first_vote_free_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + let end = 4; + assert_ok!( + Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len, + ) + ); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) + ); + + // For the motion, acc 2's first vote, expecting Ok with Pays::No. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(2), + hash.clone(), + 0, + true, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); + + // Duplicate vote, expecting error with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(2), + hash.clone(), + 0, + true, + ); + assert_eq!(vote_rval.unwrap_err().post_info.pays_fee, Pays::Yes); + + // Modifying vote, expecting ok with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(2), + hash.clone(), + 0, + false, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); + + // For the motion, acc 3's first vote, expecting Ok with Pays::No. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(3), + hash.clone(), + 0, + true, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); + + // acc 3 modify the vote, expecting Ok with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = Collective::vote( + Origin::signed(3), + hash.clone(), + 0, + false, + ); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); + + // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info + + let proposal_weight = proposal.get_dispatch_info().weight; + let close_rval: DispatchResultWithPostInfo = Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len, + ); + assert_eq!(close_rval.unwrap().pays_fee, Pays::No); + + // trying to close the proposal, which is already closed. + // Expecting error "ProposalMissing" with Pays::Yes + let close_rval: DispatchResultWithPostInfo = Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len, + ); + assert_eq!(close_rval.unwrap_err().post_info.pays_fee, Pays::Yes); + }); + } + #[test] fn motions_reproposing_disapproved_works() { new_test_ext().execute_with(|| { diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index 4e4ec5196d0a7ba741d1c3efc499a95afaac6ff8..8a76ff516ca352932b107a5a60f0232e74b955e1 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -58,7 +58,7 @@ pub trait WeightInfo { /// Weights for pallet_collective using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { (0 as Weight) .saturating_add((20_933_000 as Weight).saturating_mul(m as Weight)) diff --git a/frame/contracts/README.md b/frame/contracts/README.md index dddcc3c8b8b85dc6e98fc8b4d19c50e86ed2e193..4252bfc1d843402ef8541b28402665b0b679d95a 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -61,4 +61,4 @@ WebAssembly based smart contracts in the Rust programming language. This is a wo * [Balances](https://docs.rs/pallet-balances/latest/pallet_balances/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/contracts/fixtures/call_return_code.wat b/frame/contracts/fixtures/call_return_code.wat index f7a7ff20a49e396f90efb63882a468383aeab07c..4e9ab4dd77ce153704b60e40c065dfd1fe13a0b1 100644 --- a/frame/contracts/fixtures/call_return_code.wat +++ b/frame/contracts/fixtures/call_return_code.wat @@ -1,5 +1,5 @@ -;; This calls Django (4) and transfers 100 balance during this call and copies the return code -;; of this call to the output buffer. +;; This calls the supplied dest and transfers 100 balance during this call and copies +;; the return code of this call to the output buffer. ;; It also forwards its input to the callee. (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) @@ -7,38 +7,36 @@ (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) address of django - (data (i32.const 0) "\04\00\00\00\00\00\00\00") + ;; [0, 8) 100 balance + (data (i32.const 0) "\64\00\00\00\00\00\00\00") - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [8, 12) here we store the return code of the transfer - ;; [16, 20) here we store the return code of the transfer + ;; [12, 16) size of the input data + (data (i32.const 12) "\24") - ;; [20, 24) here we store the input data - - ;; [24, 28) size of the input data - (data (i32.const 24) "\04") + ;; [16, inf) here we store the input data + ;; 32 byte dest + 4 byte forward (func (export "deploy")) (func (export "call") - (call $seal_input (i32.const 20) (i32.const 24)) + (call $seal_input (i32.const 16) (i32.const 12)) (i32.store - (i32.const 16) + (i32.const 8) (call $seal_call - (i32.const 0) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 16) ;; Pointer to "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address - (i32.load (i32.const 24)) ;; Length of input data buffer + (i32.const 48) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Ptr to output buffer len ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/caller_contract.wat b/frame/contracts/fixtures/caller_contract.wat index 408af92e18296436326f2eb6a1bcad747b914776..d6564117b721f521203f73d06a9cf26404318065 100644 --- a/frame/contracts/fixtures/caller_contract.wat +++ b/frame/contracts/fixtures/caller_contract.wat @@ -2,7 +2,9 @@ (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_balance" (func $seal_balance (param i32 i32))) (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "seal0" "seal_println" (func $seal_println (param i32 i32))) (import "env" "memory" (memory 1 1)) @@ -71,6 +73,8 @@ (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -98,6 +102,9 @@ (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le + ) ) @@ -114,7 +121,7 @@ ;; Length of the output buffer (i32.store (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 8) + (i32.const 256) ) ;; Deploy the contract successfully. @@ -131,6 +138,8 @@ (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -142,7 +151,7 @@ ;; Check that address has the expected length (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 8)) + (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 32)) ) ;; Check that balance has been deducted. @@ -169,7 +178,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. @@ -205,7 +214,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 1) ;; Supply too little gas (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. @@ -242,7 +251,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. diff --git a/frame/contracts/fixtures/destroy_and_transfer.wat b/frame/contracts/fixtures/destroy_and_transfer.wat index 3220f4e612d7de40e3ee64f6cbce09757834dbea..7e1d84f3cf98a9d5ae412d765d6fac2708fe3595 100644 --- a/frame/contracts/fixtures/destroy_and_transfer.wat +++ b/frame/contracts/fixtures/destroy_and_transfer.wat @@ -4,7 +4,9 @@ (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "env" "memory" (memory 1 1)) ;; [0, 8) Endowment to send when creating contract. @@ -16,14 +18,18 @@ ;; [48, 80) Buffer where to store the input to the contract - ;; [80, 88) Buffer where to store the address of the instantiated contract - ;; [88, 96) Size of the buffer - (data (i32.const 88) "\08") + (data (i32.const 88) "\FF") ;; [96, 100) Size of the input buffer (data (i32.const 96) "\20") + ;; [100, 132) Buffer where to store the address of the instantiated contract + + ;; [132, 134) Salt + (data (i32.const 132) "\47\11") + + (func $assert (param i32) (block $ok (br_if $ok @@ -54,10 +60,12 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer - (i32.const 80) ;; Buffer where to store address of new contract + (i32.const 100) ;; Buffer where to store address of new contract (i32.const 88) ;; Pointer to the length of the buffer (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this cas + (i32.const 0) ;; Length is ignored in this case + (i32.const 132) ;; salt_ptr + (i32.const 2) ;; salt_len ) (i32.const 0) ) @@ -67,15 +75,15 @@ (call $assert (i32.eq (i32.load (i32.const 88)) - (i32.const 8) + (i32.const 32) ) ) ;; Store the return address. (call $seal_set_storage (i32.const 16) ;; Pointer to the key - (i32.const 80) ;; Pointer to the value - (i32.const 8) ;; Length of the value + (i32.const 100) ;; Pointer to the value + (i32.const 32) ;; Length of the value ) ) @@ -85,7 +93,7 @@ (i32.eq (call $seal_get_storage (i32.const 16) ;; Pointer to the key - (i32.const 80) ;; Pointer to the value + (i32.const 100) ;; Pointer to the value (i32.const 88) ;; Pointer to the len of the value ) (i32.const 0) @@ -94,7 +102,7 @@ (call $assert (i32.eq (i32.load (i32.const 88)) - (i32.const 8) + (i32.const 32) ) ) @@ -102,8 +110,8 @@ (call $assert (i32.eq (call $seal_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer @@ -121,8 +129,8 @@ (call $assert (i32.eq (call $seal_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 8) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer @@ -141,8 +149,8 @@ (call $assert (i32.eq (call $seal_transfer - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer ) diff --git a/frame/contracts/fixtures/drain.wat b/frame/contracts/fixtures/drain.wat index 9180047f5d015b47e75678ce12885125a99bdbe0..546026ac95986519ae016ec6e5b06c745c3afe7d 100644 --- a/frame/contracts/fixtures/drain.wat +++ b/frame/contracts/fixtures/drain.wat @@ -38,7 +38,7 @@ (i32.eq (call $seal_transfer (i32.const 16) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 32) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer ) diff --git a/frame/contracts/fixtures/instantiate_return_code.wat b/frame/contracts/fixtures/instantiate_return_code.wat index 20ab96d88ad2e2c6fc66139597cb561793a89268..cead1f1c9fa4054cdca37b586a695a7c214895d0 100644 --- a/frame/contracts/fixtures/instantiate_return_code.wat +++ b/frame/contracts/fixtures/instantiate_return_code.wat @@ -1,47 +1,49 @@ -;; This instantiats Charlie (3) and transfers 100 balance during this call and copies the return code +;; This instantiats a contract and transfers 100 balance during this call and copies the return code ;; of this call to the output buffer. ;; The first 32 byte of input is the code hash to instantiate ;; The rest of the input is forwarded to the constructor of the callee (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) address of django - (data (i32.const 0) "\04\00\00\00\00\00\00\00") + ;; [0, 8) 100 balance + (data (i32.const 0) "\64\00\00\00\00\00\00\00") - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [8, 12) here we store the return code of the transfer - ;; [16, 20) here we store the return code of the transfer + ;; [12, 16) size of the input buffer + (data (i32.const 12) "\24") - ;; [20, 24) size of the input buffer - (data (i32.const 20) "\FF") - - ;; [24, inf) input buffer + ;; [16, inf) input buffer + ;; 32 bye code hash + 4 byte forward (func (export "deploy")) (func (export "call") - (call $seal_input (i32.const 24) (i32.const 20)) + (call $seal_input (i32.const 16) (i32.const 12)) (i32.store - (i32.const 16) + (i32.const 8) (call $seal_instantiate - (i32.const 24) ;; Pointer to the code hash. + (i32.const 16) ;; Pointer to the code hash. (i32.const 32) ;; Length of the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 56) ;; Pointer to input data buffer address - (i32.sub (i32.load (i32.const 20)) (i32.const 32)) ;; Length of input data buffer + (i32.const 48) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy address (i32.const 0) ;; Length is ignored in this case (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_len ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat index 3c15f7ae0881e1e3c225872bb51c9352b3194d6d..3462af2870816dddb328e45b4a8fc2940283808b 100644 --- a/frame/contracts/fixtures/restoration.wat +++ b/frame/contracts/fixtures/restoration.wat @@ -19,20 +19,19 @@ (func (export "call") ;; copy code hash to contract memory - (call $seal_input (i32.const 264) (i32.const 304)) + (call $seal_input (i32.const 308) (i32.const 304)) (call $assert (i32.eq (i32.load (i32.const 304)) - (i32.const 32) + (i32.const 64) ) ) - (call $seal_restore_to ;; Pointer and length of the encoded dest buffer. - (i32.const 256) - (i32.const 8) + (i32.const 340) + (i32.const 32) ;; Pointer and length of the encoded code hash buffer - (i32.const 264) + (i32.const 308) (i32.const 32) ;; Pointer and length of the encoded rent_allowance buffer (i32.const 296) @@ -65,14 +64,12 @@ ;; Buffer that has ACL storage keys. (data (i32.const 100) "\01") - ;; Address of bob - (data (i32.const 256) "\02\00\00\00\00\00\00\00") - - ;; [264, 296) Code hash of SET_RENT (copied here by seal_input) - ;; [296, 304) Rent allowance (data (i32.const 296) "\32\00\00\00\00\00\00\00") - ;; [304, 308) Size of SET_RENT buffer - (data (i32.const 304) "\20") + ;; [304, 308) Size of the buffer that holds code_hash + addr + (data (i32.const 304) "\40") + + ;; [308, 340) code hash of bob (copied by seal_input) + ;; [340, 372) addr of bob (copied by seal_input) ) diff --git a/frame/contracts/fixtures/self_destruct.wat b/frame/contracts/fixtures/self_destruct.wat index 6898e746b0836719bd711b0e67949ee0906702b0..b8a37306e20110bf43087a511cdf241683da6f43 100644 --- a/frame/contracts/fixtures/self_destruct.wat +++ b/frame/contracts/fixtures/self_destruct.wat @@ -5,20 +5,23 @@ (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) reserved for $seal_address output + ;; [0, 32) reserved for $seal_address output - ;; [8, 16) length of the buffer - (data (i32.const 8) "\08") + ;; [32, 36) length of the buffer + (data (i32.const 32) "\20") - ;; [16, 24) Address of django - (data (i32.const 16) "\04\00\00\00\00\00\00\00") + ;; [36, 68) Address of django + (data (i32.const 36) + "\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04" + "\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04" + ) - ;; [24, 32) reserved for output of $seal_input + ;; [68, 72) reserved for output of $seal_input - ;; [32, 36) length of the buffer - (data (i32.const 32) "\04") + ;; [72, 76) length of the buffer + (data (i32.const 72) "\04") - ;; [36, inf) zero initialized + ;; [76, inf) zero initialized (func $assert (param i32) (block $ok @@ -36,16 +39,16 @@ ;; This should trap instead of self-destructing since a contract cannot be removed live in ;; the execution stack cannot be removed. If the recursive call traps, then trap here as ;; well. - (call $seal_input (i32.const 24) (i32.const 32)) - (if (i32.load (i32.const 32)) + (call $seal_input (i32.const 68) (i32.const 72)) + (if (i32.load (i32.const 72)) (then - (call $seal_address (i32.const 0) (i32.const 8)) + (call $seal_address (i32.const 0) (i32.const 32)) ;; Expect address to be 8 bytes. (call $assert (i32.eq - (i32.load (i32.const 8)) - (i32.const 8) + (i32.load (i32.const 32)) + (i32.const 32) ) ) @@ -54,9 +57,9 @@ (i32.eq (call $seal_call (i32.const 0) ;; Pointer to own address - (i32.const 8) ;; Length of own address + (i32.const 32) ;; Length of own address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 76) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer @@ -70,8 +73,8 @@ (else ;; Try to terminate and give balance to django. (call $seal_terminate - (i32.const 16) ;; Pointer to beneficiary address - (i32.const 8) ;; Length of beneficiary address + (i32.const 36) ;; Pointer to beneficiary address + (i32.const 32) ;; Length of beneficiary address ) (unreachable) ;; seal_terminate never returns ) diff --git a/frame/contracts/fixtures/self_destructing_constructor.wat b/frame/contracts/fixtures/self_destructing_constructor.wat index ab8c289f1b5640cf893ee79b5fa20745234fb878..85fce511e21b96fcb3b7c15fce5ad765c6e405d7 100644 --- a/frame/contracts/fixtures/self_destructing_constructor.wat +++ b/frame/contracts/fixtures/self_destructing_constructor.wat @@ -15,7 +15,7 @@ ;; Self-destruct by sending full balance to the 0 address. (call $seal_terminate (i32.const 0) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 32) ;; Length of destination address ) ) diff --git a/frame/contracts/fixtures/set_rent.wat b/frame/contracts/fixtures/set_rent.wat index a09d3dc4bd47aef14fe61c3108adbeaeedc67ba4..1c6b512cc77acfd08f73eb6a29e01908885a462c 100644 --- a/frame/contracts/fixtures/set_rent.wat +++ b/frame/contracts/fixtures/set_rent.wat @@ -26,7 +26,7 @@ (func $call_2 (call $assert (i32.eq - (call $seal_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) + (call $seal_transfer (i32.const 136) (i32.const 32) (i32.const 100) (i32.const 8)) (i32.const 0) ) ) @@ -47,10 +47,11 @@ ;; Dispatch the call according to input size (func (export "call") (local $input_size i32) - (i32.store (i32.const 64) (i32.const 64)) - (call $seal_input (i32.const 1024) (i32.const 64)) + ;; 4 byte i32 for br_table followed by 32 byte destination for transfer + (i32.store (i32.const 128) (i32.const 36)) + (call $seal_input (i32.const 132) (i32.const 128)) (set_local $input_size - (i32.load (i32.const 64)) + (i32.load (i32.const 132)) ) (block $IF_ELSE (block $IF_2 @@ -81,25 +82,24 @@ (i32.const 0) (i32.const 4) ) + (i32.store (i32.const 128) (i32.const 64)) (call $seal_input - (i32.const 0) - (i32.const 64) + (i32.const 104) + (i32.const 100) ) (call $seal_set_rent_allowance - (i32.const 0) - (i32.load (i32.const 64)) + (i32.const 104) + (i32.load (i32.const 128)) ) ) ;; Encoding of 10 in balance (data (i32.const 0) "\28") - ;; Size of the buffer at address 0 - (data (i32.const 64) "\40") + ;; encoding of 50 balance + (data (i32.const 100) "\32") - ;; encoding of Charlies's account id - (data (i32.const 68) "\03") + ;; [128, 132) size of seal input buffer - ;; encoding of 50 balance - (data (i32.const 76) "\32") + ;; [132, inf) output buffer for seal input ) diff --git a/frame/contracts/fixtures/transfer_return_code.wat b/frame/contracts/fixtures/transfer_return_code.wat index 7a1bec9adf38c5e71f5ce50aa6b02bb5f329009a..50098851dcf81ab3250b4736c559a6b391eda455 100644 --- a/frame/contracts/fixtures/transfer_return_code.wat +++ b/frame/contracts/fixtures/transfer_return_code.wat @@ -5,27 +5,30 @@ (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) zero-adress - (data (i32.const 0) "\00\00\00\00\00\00\00\00") + ;; [0, 32) zero-adress + (data (i32.const 0) + "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" + "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" + ) - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [32, 40) 100 balance + (data (i32.const 32) "\64\00\00\00\00\00\00\00") - ;; [16, 20) here we store the return code of the transfer + ;; [40, 44) here we store the return code of the transfer (func (export "deploy")) (func (export "call") (i32.store - (i32.const 16) + (i32.const 40) (call $seal_transfer (i32.const 0) ;; ptr to destination address - (i32.const 8) ;; length of destination address - (i32.const 8) ;; ptr to value to transfer + (i32.const 32) ;; length of destination address + (i32.const 32) ;; ptr to value to transfer (i32.const 8) ;; length of value to transfer ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 40) (i32.const 4)) ) ) diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index cb5052042aab23bd4f90d1c70dc102d1e3e1f0b0..847be9b434cba720e6332a162b5311284ee01f81 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -24,11 +24,12 @@ //! we define this simple definition of a contract that can be passed to `create_code` that //! compiles it down into a `WasmModule` that can be used as a contract's code. -use crate::Trait; +use crate::Config; use crate::Module as Contracts; use parity_wasm::elements::{Instruction, Instructions, FuncBody, ValueType, BlockType}; use pwasm_utils::stack_height::inject_limiter; +use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; use sp_sandbox::{EnvironmentDefinitionBuilder, Memory}; use sp_std::{prelude::*, convert::TryFrom}; @@ -86,7 +87,11 @@ pub struct ImportedMemory { } impl ImportedMemory { - pub fn max() -> Self { + pub fn max() -> Self + where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + { let pages = max_pages::(); Self { min_pages: pages, max_pages: pages } } @@ -100,13 +105,17 @@ pub struct ImportedFunction { /// A wasm module ready to be put on chain with `put_code`. #[derive(Clone)] -pub struct WasmModule { +pub struct WasmModule { pub code: Vec, pub hash: ::Output, memory: Option, } -impl From for WasmModule { +impl From for WasmModule +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ fn from(def: ModuleDefinition) -> Self { // internal functions start at that offset. let func_offset = u32::try_from(def.imported_functions.len()).unwrap(); @@ -216,7 +225,11 @@ impl From for WasmModule { } } -impl WasmModule { +impl WasmModule +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Creates a wasm module with an empty `call` and `deploy` function and nothing else. pub fn dummy() -> Self { ModuleDefinition::default().into() @@ -470,6 +483,10 @@ pub mod body { } /// The maximum amount of pages any contract is allowed to have according to the current `Schedule`. -pub fn max_pages() -> u32 { +pub fn max_pages() -> u32 +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ Contracts::::current_schedule().limits.memory_pages } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index dd9e89d6f35ab561065cc75f459803c47644fd0f..4bdd279eb8b2cf856394d55ff366b547d0499edb 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -25,7 +25,9 @@ mod sandbox; use crate::{ *, Module as Contracts, exec::StorageKey, + rent::Rent, schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, + storage::Storage, }; use self::{ code::{ @@ -48,7 +50,7 @@ const API_BENCHMARK_BATCHES: u32 = 20; const INSTR_BENCHMARK_BATCHES: u32 = 1; /// An instantiated and deployed contract. -struct Contract { +struct Contract { caller: T::AccountId, account_id: T::AccountId, addr: ::Source, @@ -70,12 +72,16 @@ impl Endow { /// The maximum amount of balance a caller can transfer without being brought below /// the existential deposit. This assumes that every caller is funded with the amount /// returned by `caller_funding`. - fn max() -> BalanceOf { + fn max() -> BalanceOf { caller_funding::().saturating_sub(T::Currency::minimum_balance()) } } -impl Contract { +impl Contract +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Create new contract and use a default account id as instantiator. fn new( module: WasmModule, @@ -109,7 +115,7 @@ impl Contract { // storage_size cannot be zero because otherwise a contract that is just above // the subsistence threshold does not pay rent given a large enough subsistence // threshold. But we need rent payments to occur in order to benchmark for worst cases. - let storage_size = Config::::subsistence_threshold_uncached() + let storage_size = ConfigCache::::subsistence_threshold_uncached() .checked_div(&T::RentDepositOffset::get()) .unwrap_or_else(Zero::zero); @@ -123,7 +129,8 @@ impl Contract { Endow::Max => (0u32.into(), Endow::max::()), }; T::Currency::make_free_balance_be(&caller, caller_funding::()); - let addr = T::DetermineContractAddress::contract_address_for(&module.hash, &data, &caller); + let salt = vec![0xff]; + let addr = Contracts::::contract_address(&caller, &module.hash, &salt); // The default block number is zero. The benchmarking system bumps the block number // to one for the benchmarking closure when it is set to zero. In order to prevent this @@ -139,6 +146,7 @@ impl Contract { Weight::max_value(), module.hash, data, + salt, )?; let result = Contract { @@ -160,7 +168,7 @@ impl Contract { fn store(&self, items: &Vec<(StorageKey, Vec)>) -> Result<(), &'static str> { let info = self.alive_info()?; for item in items { - crate::storage::write_contract_storage::( + Storage::::write( &self.account_id, &info.trie_id, &item.0, @@ -192,7 +200,7 @@ impl Contract { /// Get the block number when this contract will be evicted. Returns an error when /// the rent collection won't happen because the contract has to much endowment. fn eviction_at(&self) -> Result { - let projection = crate::rent::compute_rent_projection::(&self.account_id) + let projection = Rent::::compute_projection(&self.account_id) .map_err(|_| "Invalid acc for rent")?; match projection { RentProjection::EvictionAt(at) => Ok(at), @@ -204,14 +212,18 @@ impl Contract { /// A `Contract` that was evicted after accumulating some storage. /// /// This is used to benchmark contract resurrection. -struct Tombstone { +struct Tombstone { /// The contract that was evicted. contract: Contract, /// The storage the contract held when it was avicted. storage: Vec<(StorageKey, Vec)>, } -impl Tombstone { +impl Tombstone +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Create and evict a new contract with the supplied storage item count and size each. fn new(stor_num: u32, stor_size: u32) -> Result { let contract = Contract::::new(WasmModule::dummy(), vec![], Endow::CollectRent)?; @@ -220,7 +232,7 @@ impl Tombstone { System::::set_block_number( contract.eviction_at()? + T::SignedClaimHandicap::get() + 5u32.into() ); - crate::rent::collect_rent::(&contract.account_id); + Rent::::collect(&contract.account_id); contract.ensure_tombstone()?; Ok(Tombstone { @@ -231,7 +243,7 @@ impl Tombstone { } /// Generate `stor_num` storage items. Each has the size `stor_size`. -fn create_storage( +fn create_storage( stor_num: u32, stor_size: u32 ) -> Result)>, &'static str> { @@ -245,11 +257,16 @@ fn create_storage( } /// The funding that each account that either calls or instantiates contracts is funded with. -fn caller_funding() -> BalanceOf { +fn caller_funding() -> BalanceOf { BalanceOf::::max_value() / 2u32.into() } benchmarks! { + where_clause { where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + } + _ { } @@ -276,17 +293,20 @@ benchmarks! { // The size of the input data influences the runtime because it is hashed in order to determine // the contract address. // `n`: Size of the data passed to constructor in kilobytes. + // `s`: Size of the salt in kilobytes. instantiate { let n in 0 .. code::max_pages::() * 64; + let s in 0 .. code::max_pages::() * 64; let data = vec![42u8; (n * 1024) as usize]; - let endowment = Config::::subsistence_threshold_uncached(); + let salt = vec![42u8; (s * 1024) as usize]; + let endowment = ConfigCache::::subsistence_threshold_uncached(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy_with_mem(); let origin = RawOrigin::Signed(caller.clone()); - let addr = T::DetermineContractAddress::contract_address_for(&hash, &data, &caller); + let addr = Contracts::::contract_address(&caller, &hash, &salt); Contracts::::put_code_raw(code)?; - }: _(origin, endowment, Weight::max_value(), hash, data) + }: _(origin, endowment, Weight::max_value(), hash, data, salt) verify { // endowment was removed from the caller assert_eq!(T::Currency::free_balance(&caller), caller_funding::() - endowment); @@ -354,7 +374,7 @@ benchmarks! { // the caller should get the reward for being a good snitch assert_eq!( T::Currency::free_balance(&instance.caller), - caller_funding::() - instance.endowment + ::SurchargeReward::get(), + caller_funding::() - instance.endowment + ::SurchargeReward::get(), ); } @@ -1000,7 +1020,7 @@ benchmarks! { let instance = Contract::::new(code, vec![], Endow::Max)?; let trie_id = instance.alive_info()?.trie_id; for key in keys { - crate::storage::write_contract_storage::( + Storage::::write( &instance.account_id, &trie_id, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, @@ -1045,7 +1065,7 @@ benchmarks! { let instance = Contract::::new(code, vec![], Endow::Max)?; let trie_id = instance.alive_info()?.trie_id; for key in keys { - crate::storage::write_contract_storage::( + Storage::::write( &instance.account_id, &trie_id, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, @@ -1089,7 +1109,7 @@ benchmarks! { }); let instance = Contract::::new(code, vec![], Endow::Max)?; let trie_id = instance.alive_info()?.trie_id; - crate::storage::write_contract_storage::( + Storage::::write( &instance.account_id, &trie_id, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, @@ -1107,7 +1127,7 @@ benchmarks! { .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); - let value = Config::::subsistence_threshold_uncached(); + let value = ConfigCache::::subsistence_threshold_uncached(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1314,7 +1334,7 @@ benchmarks! { let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); let hashes_len = hashes_bytes.len(); - let value = Config::::subsistence_threshold_uncached(); + let value = ConfigCache::::subsistence_threshold_uncached(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1341,7 +1361,9 @@ benchmarks! { ValueType::I32, ValueType::I32, ValueType::I32, - ValueType::I32 + ValueType::I32, + ValueType::I32, + ValueType::I32, ], return_type: Some(ValueType::I32), }], @@ -1371,6 +1393,8 @@ benchmarks! { Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr Regular(Instruction::I32Const(u32::max_value() as i32)), // output_ptr Regular(Instruction::I32Const(0)), // output_len_ptr + Regular(Instruction::I32Const(0)), // salt_ptr + Regular(Instruction::I32Const(0)), // salt_ptr_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), @@ -1381,8 +1405,8 @@ benchmarks! { let callee = instance.addr.clone(); let addresses = hashes .iter() - .map(|hash| T::DetermineContractAddress::contract_address_for( - hash, &[], &instance.account_id + .map(|hash| Contracts::::contract_address( + &instance.account_id, hash, &[], )) .collect::>(); @@ -1398,9 +1422,10 @@ benchmarks! { } } - seal_instantiate_per_input_output_kb { + seal_instantiate_per_input_output_salt_kb { let i in 0 .. (code::max_pages::() - 1) * 64; let o in 0 .. (code::max_pages::() - 1) * 64; + let s in 0 .. (code::max_pages::() - 1) * 64; let callee_code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { @@ -1429,7 +1454,7 @@ benchmarks! { let input_len = inputs.get(0).map(|x| x.len()).unwrap_or(0); let input_bytes = inputs.iter().cloned().flatten().collect::>(); let inputs_len = input_bytes.len(); - let value = Config::::subsistence_threshold_uncached(); + let value = ConfigCache::::subsistence_threshold_uncached(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1458,7 +1483,9 @@ benchmarks! { ValueType::I32, ValueType::I32, ValueType::I32, - ValueType::I32 + ValueType::I32, + ValueType::I32, + ValueType::I32, ], return_type: Some(ValueType::I32), }], @@ -1496,6 +1523,8 @@ benchmarks! { Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr Regular(Instruction::I32Const(output_offset as i32)), // output_ptr Regular(Instruction::I32Const(output_len_offset as i32)), // output_len_ptr + Counter(input_offset as u32, input_len as u32), // salt_ptr + Regular(Instruction::I32Const((s * 1024).max(input_len as u32) as i32)), // salt_len Regular(Instruction::Call(0)), Regular(Instruction::I32Eqz), Regular(Instruction::If(BlockType::NoResult)), @@ -2401,6 +2430,8 @@ mod tests { create_test!(seal_transfer); create_test!(seal_call); create_test!(seal_call_per_transfer_input_output_kb); + create_test!(seal_instantiate); + create_test!(seal_instantiate_per_input_output_salt_kb); create_test!(seal_clear_storage); create_test!(seal_hash_sha2_256); create_test!(seal_hash_sha2_256_per_kb); diff --git a/frame/contracts/src/benchmarking/sandbox.rs b/frame/contracts/src/benchmarking/sandbox.rs index 1d93db19ee59d8d1a647488dd5d79330b3efd44c..61277ebce6780832930bd91d8d079cf6498bca26 100644 --- a/frame/contracts/src/benchmarking/sandbox.rs +++ b/frame/contracts/src/benchmarking/sandbox.rs @@ -19,8 +19,11 @@ ///! sandbox to execute the wasm code. This is because we do not need the full ///! environment that provides the seal interface as imported functions. -use super::code::WasmModule; -use super::Trait; +use super::{ + Config, + code::WasmModule, +}; +use sp_core::crypto::UncheckedFrom; use sp_sandbox::{EnvironmentDefinitionBuilder, Instance, Memory}; /// Minimal execution environment without any exported functions. @@ -36,7 +39,11 @@ impl Sandbox { } } -impl From<&WasmModule> for Sandbox { +impl From<&WasmModule> for Sandbox +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Creates an instance from the supplied module and supplies as much memory /// to the instance as the module declares as imported. fn from(module: &WasmModule) -> Self { diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f93f262d821e959f09e9f1768c531fbcaa80a45a..8577d04452fa8659abd095984a1fa8daf8507e94 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -15,10 +15,11 @@ // along with Substrate. If not, see . use crate::{ - CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, - TrieId, BalanceOf, ContractInfo, TrieIdGenerator, - gas::GasMeter, rent, storage, Error, ContractInfoOf + CodeHash, ConfigCache, Event, RawEvent, Config, Module as Contracts, + TrieId, BalanceOf, ContractInfo, gas::GasMeter, rent::Rent, storage::{self, Storage}, + Error, ContractInfoOf }; +use sp_core::crypto::UncheckedFrom; use sp_std::prelude::*; use sp_runtime::traits::{Bounded, Zero, Convert, Saturating}; use frame_support::{ @@ -29,14 +30,14 @@ use frame_support::{ }; use pallet_contracts_primitives::{ErrorOrigin, ExecError, ExecReturnValue, ExecResult, ReturnFlags}; -pub type AccountIdOf = ::AccountId; -pub type MomentOf = <::Time as Time>::Moment; -pub type SeedOf = ::Hash; -pub type BlockNumberOf = ::BlockNumber; +pub type AccountIdOf = ::AccountId; +pub type MomentOf = <::Time as Time>::Moment; +pub type SeedOf = ::Hash; +pub type BlockNumberOf = ::BlockNumber; pub type StorageKey = [u8; 32]; /// A type that represents a topic of an event. At the moment a hash is used. -pub type TopicOf = ::Hash; +pub type TopicOf = ::Hash; /// Describes whether we deal with a contract or a plain account. pub enum TransactorKind { @@ -53,7 +54,7 @@ pub enum TransactorKind { /// This interface is specialized to an account of the executing code, so all /// operations are implicitly performed on that account. pub trait Ext { - type T: Trait; + type T: Config; /// Returns the storage entry of the executing account by the given `key`. /// @@ -75,6 +76,7 @@ pub trait Ext { value: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, + salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; /// Transfer some amount of funds into the specified account. @@ -118,7 +120,7 @@ pub trait Ext { code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(), &'static str>; + ) -> Result<(), DispatchError>; /// Returns a reference to the account id of the caller. fn caller(&self) -> &AccountIdOf; @@ -169,7 +171,7 @@ pub trait Ext { /// Loader is a companion of the `Vm` trait. It loads an appropriate abstract /// executable to be executed by an accompanying `Vm` implementation. -pub trait Loader { +pub trait Loader { type Executable; /// Load the initializer portion of the code specified by the `code_hash`. This @@ -188,7 +190,7 @@ pub trait Loader { /// /// Execution of code can end by either implicit termination (that is, reached the end of /// executable), explicit termination via returning a buffer or termination due to a trap. -pub trait Vm { +pub trait Vm { type Executable; fn execute>( @@ -200,12 +202,12 @@ pub trait Vm { ) -> ExecResult; } -pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { +pub struct ExecutionContext<'a, T: Config + 'a, V, L> { pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, pub self_trie_id: Option, pub depth: usize, - pub config: &'a Config, + pub config: &'a ConfigCache, pub vm: &'a V, pub loader: &'a L, pub timestamp: MomentOf, @@ -214,7 +216,8 @@ pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { impl<'a, T, E, V, L> ExecutionContext<'a, T, V, L> where - T: Trait, + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, L: Loader, V: Vm, { @@ -222,7 +225,7 @@ where /// /// The specified `origin` address will be used as `sender` for. The `origin` must be a regular /// account (not a contract). - pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { + pub fn top_level(origin: T::AccountId, cfg: &'a ConfigCache, vm: &'a V, loader: &'a L) -> Self { ExecutionContext { caller: None, self_trie_id: None, @@ -264,12 +267,12 @@ where Err(Error::::MaxCallDepthReached)? } - // Assumption: `collect_rent` doesn't collide with overlay because - // `collect_rent` will be done on first call and destination contract and balance + // Assumption: `collect` doesn't collide with overlay because + // `collect` will be done on first call and destination contract and balance // cannot be changed before the first call // We do not allow 'calling' plain accounts. For transfering value // `seal_transfer` must be used. - let contract = if let Some(ContractInfo::Alive(info)) = rent::collect_rent::(&dest) { + let contract = if let Some(ContractInfo::Alive(info)) = Rent::::collect(&dest) { info } else { Err(Error::::NotCallable)? @@ -308,6 +311,7 @@ where gas_meter: &mut GasMeter, code_hash: &CodeHash, input_data: Vec, + salt: &[u8], ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { if self.depth == self.config.max_depth as usize { Err(Error::::MaxCallDepthReached)? @@ -315,19 +319,15 @@ where let transactor_kind = self.transactor_kind(); let caller = self.self_account.clone(); - let dest = T::DetermineContractAddress::contract_address_for( - code_hash, - &input_data, - &caller, - ); + let dest = Contracts::::contract_address(&caller, code_hash, salt); // TrieId has not been generated yet and storage is empty since contract is new. // // Generate it now. - let dest_trie_id = ::TrieIdGenerator::trie_id(&dest); + let dest_trie_id = Storage::::generate_trie_id(&dest); let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { - storage::place_contract::( + Storage::::place_contract( &dest, nested .self_trie_id @@ -437,14 +437,17 @@ enum TransferCause { /// is specified as `Terminate`. Otherwise, any transfer that would bring the sender below the /// subsistence threshold (for contracts) or the existential deposit (for plain accounts) /// results in an error. -fn transfer<'a, T: Trait, V: Vm, L: Loader>( +fn transfer<'a, T: Config, V: Vm, L: Loader>( cause: TransferCause, origin: TransactorKind, transactor: &T::AccountId, dest: &T::AccountId, value: BalanceOf, ctx: &mut ExecutionContext<'a, T, V, L>, -) -> Result<(), DispatchError> { +) -> Result<(), DispatchError> +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ use self::TransferCause::*; use self::TransactorKind::*; @@ -480,7 +483,7 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( /// implies that the control won't be returned to the contract anymore, but there is still some code /// on the path of the return from that call context. Therefore, care must be taken in these /// situations. -struct CallContext<'a, 'b: 'a, T: Trait + 'b, V: Vm + 'b, L: Loader> { +struct CallContext<'a, 'b: 'a, T: Config + 'b, V: Vm + 'b, L: Loader> { ctx: &'a mut ExecutionContext<'b, T, V, L>, caller: T::AccountId, value_transferred: BalanceOf, @@ -490,7 +493,8 @@ struct CallContext<'a, 'b: 'a, T: Trait + 'b, V: Vm + 'b, L: Loader> { impl<'a, 'b: 'a, T, E, V, L> Ext for CallContext<'a, 'b, T, V, L> where - T: Trait + 'b, + T: Config + 'b, + T::AccountId: UncheckedFrom + AsRef<[u8]>, V: Vm, L: Loader, { @@ -503,7 +507,7 @@ where expect can't fail;\ qed", ); - storage::read_contract_storage(trie_id, key) + Storage::::read(trie_id, key) } fn set_storage(&mut self, key: StorageKey, value: Option>) { @@ -514,12 +518,12 @@ where qed", ); if let Err(storage::ContractAbsentError) = - storage::write_contract_storage::(&self.ctx.self_account, trie_id, &key, value) + Storage::::write(&self.ctx.self_account, trie_id, &key, value) { panic!( "the contract must be in the alive state within the `CallContext`;\ the contract cannot be absent in storage; - write_contract_storage cannot return `None`; + write cannot return `None`; qed" ); } @@ -531,8 +535,9 @@ where endowment: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, + salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { - self.ctx.instantiate(endowment, gas_meter, code_hash, input_data) + self.ctx.instantiate(endowment, gas_meter, code_hash, input_data, salt) } fn transfer( @@ -558,9 +563,7 @@ where let value = T::Currency::free_balance(&self_id); if let Some(caller_ctx) = self.ctx.caller { if caller_ctx.is_live(&self_id) { - return Err(DispatchError::Other( - "Cannot terminate a contract that is present on the call stack", - )); + return Err(Error::::ReentranceDenied.into()); } } transfer( @@ -576,7 +579,7 @@ where a contract has a trie id;\ this can't be None; qed", ); - storage::destroy_contract::(&self_id, self_trie_id); + Storage::::destroy_contract(&self_id, self_trie_id); Ok(()) } @@ -596,16 +599,14 @@ where code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(), &'static str> { + ) -> Result<(), DispatchError> { if let Some(caller_ctx) = self.ctx.caller { if caller_ctx.is_live(&self.ctx.self_account) { - return Err( - "Cannot perform restoration of a contract that is present on the call stack", - ); + return Err(Error::::ReentranceDenied.into()); } } - let result = crate::rent::restore_to::( + let result = Rent::::restore_to( self.ctx.self_account.clone(), dest.clone(), code_hash.clone(), @@ -667,7 +668,7 @@ where fn set_rent_allowance(&mut self, rent_allowance: BalanceOf) { if let Err(storage::ContractAbsentError) = - storage::set_rent_allowance::(&self.ctx.self_account, rent_allowance) + Storage::::set_rent_allowance(&self.ctx.self_account, rent_allowance) { panic!( "`self_account` points to an alive contract within the `CallContext`; @@ -677,7 +678,7 @@ where } fn rent_allowance(&self) -> BalanceOf { - storage::rent_allowance::(&self.ctx.self_account) + Storage::::rent_allowance(&self.ctx.self_account) .unwrap_or_else(|_| >::max_value()) // Must never be triggered actually } @@ -692,13 +693,13 @@ where } } -fn deposit_event( +fn deposit_event( topics: Vec, event: Event, ) { >::deposit_event_indexed( &*topics, - ::Event::from(event).into(), + ::Event::from(event).into(), ) } @@ -711,23 +712,21 @@ fn deposit_event( mod tests { use super::{ BalanceOf, Event, ExecResult, ExecutionContext, Ext, Loader, - RawEvent, Vm, ReturnFlags, ExecError, ErrorOrigin + RawEvent, Vm, ReturnFlags, ExecError, ErrorOrigin, AccountIdOf, }; use crate::{ gas::GasMeter, tests::{ExtBuilder, Test, MetaEvent}, - exec::ExecReturnValue, CodeHash, Config, + exec::ExecReturnValue, CodeHash, ConfigCache, gas::Gas, - storage, Error + storage::Storage, + tests::{ALICE, BOB, CHARLIE}, + Error, }; use crate::tests::test_utils::{place_contract, set_balance, get_balance}; use sp_runtime::DispatchError; use assert_matches::assert_matches; use std::{cell::RefCell, collections::HashMap, marker::PhantomData, rc::Rc}; - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; - const GAS_LIMIT: Gas = 10_000_000_000; fn events() -> Vec> { @@ -770,7 +769,7 @@ mod tests { fn insert(&mut self, f: impl Fn(MockCtx) -> ExecResult + 'a) -> CodeHash { // Generate code hashes as monotonically increasing values. - let code_hash = ::Hash::from_low_u64_be(self.counter); + let code_hash = ::Hash::from_low_u64_be(self.counter); self.counter += 1; self.map.insert(code_hash, MockExecutable::new(f)); @@ -844,7 +843,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); place_contract(&BOB, exec_ch); @@ -868,8 +867,8 @@ mod tests { let loader = MockLoader::empty(); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let cfg = ConfigCache::preload(); + let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); set_balance(&origin, 100); set_balance(&dest, 0); @@ -901,14 +900,14 @@ mod tests { ); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let cfg = ConfigCache::preload(); + let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); place_contract(&BOB, return_ch); set_balance(&origin, 100); set_balance(&dest, 0); let output = ctx.call( - dest, + dest.clone(), 55, &mut GasMeter::::new(GAS_LIMIT), vec![], @@ -931,8 +930,8 @@ mod tests { let loader = MockLoader::empty(); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let cfg = ConfigCache::preload(); + let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); set_balance(&origin, 0); let result = super::transfer( @@ -967,7 +966,7 @@ mod tests { ); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); place_contract(&BOB, return_ch); @@ -998,7 +997,7 @@ mod tests { ); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); place_contract(&BOB, return_ch); @@ -1026,7 +1025,7 @@ mod tests { // This one tests passing the input data into a contract via call. ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); place_contract(&BOB, input_data_ch); @@ -1051,7 +1050,7 @@ mod tests { // This one tests passing the input data into a contract via instantiate. ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 100); @@ -1061,6 +1060,7 @@ mod tests { &mut GasMeter::::new(GAS_LIMIT), &input_data_ch, vec![1, 2, 3, 4], + &[], ); assert_matches!(result, Ok(_)); }); @@ -1097,7 +1097,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&BOB, 1); place_contract(&BOB, recurse_ch); @@ -1120,13 +1120,13 @@ mod tests { let vm = MockVm::new(); - let witnessed_caller_bob = RefCell::new(None::); - let witnessed_caller_charlie = RefCell::new(None::); + let witnessed_caller_bob = RefCell::new(None::>); + let witnessed_caller_charlie = RefCell::new(None::>); let mut loader = MockLoader::empty(); let bob_ch = loader.insert(|ctx| { // Record the caller for bob. - *witnessed_caller_bob.borrow_mut() = Some(*ctx.ext.caller()); + *witnessed_caller_bob.borrow_mut() = Some(ctx.ext.caller().clone()); // Call into CHARLIE contract. assert_matches!( @@ -1137,19 +1137,19 @@ mod tests { }); let charlie_ch = loader.insert(|ctx| { // Record the caller for charlie. - *witnessed_caller_charlie.borrow_mut() = Some(*ctx.ext.caller()); + *witnessed_caller_charlie.borrow_mut() = Some(ctx.ext.caller().clone()); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); place_contract(&dest, bob_ch); place_contract(&CHARLIE, charlie_ch); let result = ctx.call( - dest, + dest.clone(), 0, &mut GasMeter::::new(GAS_LIMIT), vec![], @@ -1184,7 +1184,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); place_contract(&BOB, bob_ch); place_contract(&CHARLIE, charlie_ch); @@ -1208,7 +1208,7 @@ mod tests { let dummy_ch = loader.insert(|_| exec_success()); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); assert_matches!( @@ -1217,6 +1217,7 @@ mod tests { &mut GasMeter::::new(GAS_LIMIT), &dummy_ch, vec![], + &[], ), Err(_) ); @@ -1233,7 +1234,7 @@ mod tests { ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 1000); @@ -1243,13 +1244,14 @@ mod tests { &mut GasMeter::::new(GAS_LIMIT), &dummy_ch, vec![], + &[], ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); + assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); assert_eq!(&events(), &[ RawEvent::Instantiated(ALICE, instantiated_contract_address) ]); @@ -1266,7 +1268,7 @@ mod tests { ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 1000); @@ -1276,12 +1278,13 @@ mod tests { &mut GasMeter::::new(GAS_LIMIT), &dummy_ch, vec![], + &[], ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); // Check that the account has not been created. - assert!(storage::code_hash::(&instantiated_contract_address).is_err()); + assert!(Storage::::code_hash(&instantiated_contract_address).is_err()); assert!(events().is_empty()); }); } @@ -1292,7 +1295,7 @@ mod tests { let mut loader = MockLoader::empty(); let dummy_ch = loader.insert(|_| exec_success()); - let instantiated_contract_address = Rc::new(RefCell::new(None::)); + let instantiated_contract_address = Rc::new(RefCell::new(None::>)); let instantiator_ch = loader.insert({ let dummy_ch = dummy_ch.clone(); let instantiated_contract_address = Rc::clone(&instantiated_contract_address); @@ -1300,9 +1303,10 @@ mod tests { // Instantiate a contract and save it's address in `instantiated_contract_address`. let (address, output) = ctx.ext.instantiate( &dummy_ch, - Config::::subsistence_threshold_uncached(), + ConfigCache::::subsistence_threshold_uncached(), ctx.gas_meter, - vec![] + vec![], + &[48, 49, 50], ).unwrap(); *instantiated_contract_address.borrow_mut() = address.into(); @@ -1311,7 +1315,7 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 1000); set_balance(&BOB, 100); @@ -1326,7 +1330,7 @@ mod tests { // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); + assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); assert_eq!(&events(), &[ RawEvent::Instantiated(BOB, instantiated_contract_address) ]); @@ -1350,7 +1354,8 @@ mod tests { &dummy_ch, 15u64, ctx.gas_meter, - vec![] + vec![], + &[], ), Err(ExecError { error: DispatchError::Other("It's a trap!"), @@ -1363,7 +1368,7 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 1000); set_balance(&BOB, 100); @@ -1395,7 +1400,7 @@ mod tests { .existential_deposit(15) .build() .execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 1000); @@ -1405,6 +1410,7 @@ mod tests { &mut GasMeter::::new(GAS_LIMIT), &terminate_ch, vec![], + &[], ), Err(Error::::NewContractNotFunded.into()) ); @@ -1428,7 +1434,7 @@ mod tests { }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); set_balance(&ALICE, 100); @@ -1437,6 +1443,7 @@ mod tests { &mut GasMeter::::new(GAS_LIMIT), &rent_allowance_ch, vec![], + &[], ); assert_matches!(result, Ok(_)); }); diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 0828a220c040695083f7cd932b6b1ece19cad134..18a200fd312cd58748d8be01a02165d2df877ecc 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::Trait; +use crate::Config; use sp_std::marker::PhantomData; use sp_runtime::traits::Zero; use frame_support::dispatch::{ @@ -60,7 +60,7 @@ impl TestAuxiliaries for T {} /// Implementing type is expected to be super lightweight hence `Copy` (`Clone` is added /// for consistency). If inlined there should be no observable difference compared /// to a hand-written code. -pub trait Token: Copy + Clone + TestAuxiliaries { +pub trait Token: Copy + Clone + TestAuxiliaries { /// Metadata type, which the token can require for calculating the amount /// of gas to charge. Can be a some configuration type or /// just the `()`. @@ -84,7 +84,7 @@ pub struct ErasedToken { pub token: Box, } -pub struct GasMeter { +pub struct GasMeter { gas_limit: Gas, /// Amount of gas left from initial gas limit. Can reach zero. gas_left: Gas, @@ -92,7 +92,7 @@ pub struct GasMeter { #[cfg(test)] tokens: Vec, } -impl GasMeter { +impl GasMeter { pub fn new(gas_limit: Gas) -> Self { GasMeter { gas_limit, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index f43bfd0ebdb6f859c48edc6d526d3db85af87d1e..f0200fbd15fd490b48c6fae6a8a0fbd26be3e02d 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -18,7 +18,7 @@ //! //! The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. //! -//! - [`contract::Trait`](./trait.Trait.html) +//! - [`contract::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -93,14 +93,18 @@ pub mod weights; #[cfg(test)] mod tests; -use crate::exec::ExecutionContext; -use crate::wasm::{WasmLoader, WasmVm}; -use crate::weights::WeightInfo; - -pub use crate::gas::{Gas, GasMeter}; -pub use crate::wasm::ReturnCode as RuntimeReturnCode; -pub use crate::schedule::{Schedule, HostFnWeights, InstructionWeights, Limits}; - +pub use crate::{ + gas::{Gas, GasMeter}, + wasm::ReturnCode as RuntimeReturnCode, + weights::WeightInfo, + schedule::{Schedule, HostFnWeights, InstructionWeights, Limits}, +}; +use crate::{ + exec::ExecutionContext, + wasm::{WasmLoader, WasmVm}, + rent::Rent, + storage::Storage, +}; use sp_core::crypto::UncheckedFrom; use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; use codec::{Codec, Encode, Decode}; @@ -112,7 +116,7 @@ use sp_runtime::{ }; use frame_support::{ decl_module, decl_event, decl_storage, decl_error, ensure, - parameter_types, storage::child::ChildInfo, + storage::child::ChildInfo, dispatch::{DispatchResult, DispatchResultWithPostInfo}, traits::{OnUnbalanced, Currency, Get, Time, Randomness}, }; @@ -122,23 +126,18 @@ use pallet_contracts_primitives::{ }; use frame_support::weights::Weight; -pub type CodeHash = ::Hash; +pub type CodeHash = ::Hash; pub type TrieId = Vec; -/// A function that generates an `AccountId` for a contract upon instantiation. -pub trait ContractAddressFor { - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; -} - /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account #[derive(Encode, Decode, RuntimeDebug)] -pub enum ContractInfo { +pub enum ContractInfo { Alive(AliveContractInfo), Tombstone(TombstoneContractInfo), } -impl ContractInfo { +impl ContractInfo { /// If contract is alive then return some alive info pub fn get_alive(self) -> Option> { if let ContractInfo::Alive(alive) = self { @@ -191,7 +190,7 @@ impl ContractInfo { } pub type AliveContractInfo = - RawAliveContractInfo, BalanceOf, ::BlockNumber>; + RawAliveContractInfo, BalanceOf, ::BlockNumber>; /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account. @@ -231,7 +230,7 @@ pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { } pub type TombstoneContractInfo = - RawTombstoneContractInfo<::Hash, ::Hashing>; + RawTombstoneContractInfo<::Hash, ::Hashing>; #[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] pub struct RawTombstoneContractInfo(H, PhantomData); @@ -251,73 +250,18 @@ where } } -impl From> for ContractInfo { +impl From> for ContractInfo { fn from(alive_info: AliveContractInfo) -> Self { Self::Alive(alive_info) } } -/// Get a trie id (trie id must be unique and collision resistant depending upon its context). -/// Note that it is different than encode because trie id should be collision resistant -/// (being a proper unique identifier). -pub trait TrieIdGenerator { - /// Get a trie id for an account, using reference to parent account trie id to ensure - /// uniqueness of trie id. - /// - /// The implementation must ensure every new trie id is unique: two consecutive calls with the - /// same parameter needs to return different trie id values. - fn trie_id(account_id: &AccountId) -> TrieId; -} - -/// Get trie id from `account_id`. -pub struct TrieIdFromParentCounter(PhantomData); - -/// This generator uses inner counter for account id and applies the hash over `AccountId + -/// accountid_counter`. -impl TrieIdGenerator for TrieIdFromParentCounter -where - T::AccountId: AsRef<[u8]> -{ - fn trie_id(account_id: &T::AccountId) -> TrieId { - // Note that skipping a value due to error is not an issue here. - // We only need uniqueness, not sequence. - let new_seed = AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - let mut buf = Vec::new(); - buf.extend_from_slice(account_id.as_ref()); - buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - T::Hashing::hash(&buf[..]).as_ref().into() - } -} - pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; pub type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; - -parameter_types! { - /// A reasonable default value for [`Trait::SignedClaimedHandicap`]. - pub const DefaultSignedClaimHandicap: u32 = 2; - /// A reasonable default value for [`Trait::TombstoneDeposit`]. - pub const DefaultTombstoneDeposit: u32 = 16; - /// A reasonable default value for [`Trait::StorageSizeOffset`]. - pub const DefaultStorageSizeOffset: u32 = 8; - /// A reasonable default value for [`Trait::RentByteFee`]. - pub const DefaultRentByteFee: u32 = 4; - /// A reasonable default value for [`Trait::RentDepositOffset`]. - pub const DefaultRentDepositOffset: u32 = 1000; - /// A reasonable default value for [`Trait::SurchargeReward`]. - pub const DefaultSurchargeReward: u32 = 150; - /// A reasonable default value for [`Trait::MaxDepth`]. - pub const DefaultMaxDepth: u32 = 32; - /// A reasonable default value for [`Trait::MaxValueSize`]. - pub const DefaultMaxValueSize: u32 = 16_384; -} + <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { type Time: Time; type Randomness: Randomness; @@ -325,13 +269,7 @@ pub trait Trait: frame_system::Trait { type Currency: Currency; /// The overarching event type. - type Event: From> + Into<::Event>; - - /// A function type to get the contract address given the instantiator. - type DetermineContractAddress: ContractAddressFor, Self::AccountId>; - - /// trie id generator - type TrieIdGenerator: TrieIdGenerator; + type Event: From> + Into<::Event>; /// Handler for rent payments. type RentPayment: OnUnbalanced>; @@ -383,32 +321,13 @@ pub trait Trait: frame_system::Trait { type WeightInfo: WeightInfo; } -/// Simple contract address determiner. -/// -/// Address calculated from the code (of the constructor), input data to the constructor, -/// and the account id that requested the account creation. -/// -/// Formula: `blake2_256(blake2_256(code) + blake2_256(data) + origin)` -pub struct SimpleAddressDeterminer(PhantomData); -impl ContractAddressFor, T::AccountId> for SimpleAddressDeterminer -where - T::AccountId: UncheckedFrom + AsRef<[u8]> -{ - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &T::AccountId) -> T::AccountId { - let data_hash = T::Hashing::hash(data); - - let mut buf = Vec::new(); - buf.extend_from_slice(code_hash.as_ref()); - buf.extend_from_slice(data_hash.as_ref()); - buf.extend_from_slice(origin.as_ref()); - - UncheckedFrom::unchecked_from(T::Hashing::hash(&buf[..])) - } -} - decl_error! { /// Error for the contracts module. - pub enum Error for Module { + pub enum Error for Module + where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { /// A new schedule must have a greater version than the current one. InvalidScheduleVersion, /// An origin must be signed or inherent and auxiliary sender only provided on inherent. @@ -455,12 +374,21 @@ decl_error! { ContractTrapped, /// The size defined in `T::MaxValueSize` was exceeded. ValueTooLarge, + /// The action performed is not allowed while the contract performing it is already + /// on the call stack. Those actions are contract self destruction and restoration + /// of a tombstone. + ReentranceDenied, } } decl_module! { /// Contracts module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call + where + origin: T::Origin, + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { type Error = Error; /// Number of block delay an extrinsic claim surcharge has. @@ -563,29 +491,38 @@ decl_module! { gas_meter.into_dispatch_result(result) } - /// Instantiates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. + /// Instantiates a new contract from the `code_hash` generated by `put_code`, + /// optionally transferring some balance. + /// + /// The supplied `salt` is used for contract address deriviation. See `fn contract_address`. /// /// Instantiation is executed as follows: /// - /// - The destination address is computed based on the sender and hash of the code. + /// - The destination address is computed based on the sender, code_hash and the salt. /// - The smart-contract account is created at the computed address. /// - The `ctor_code` is executed in the context of the newly-created account. Buffer returned /// after the execution is saved as the `code` of the account. That code will be invoked /// upon any call received by this account. /// - The contract is initialized. - #[weight = T::WeightInfo::instantiate(data.len() as u32 / 1024).saturating_add(*gas_limit)] + #[weight = + T::WeightInfo::instantiate( + data.len() as u32 / 1024, + salt.len() as u32 / 1024, + ).saturating_add(*gas_limit) + ] pub fn instantiate( origin, #[compact] endowment: BalanceOf, #[compact] gas_limit: Gas, code_hash: CodeHash, - data: Vec + data: Vec, + salt: Vec, ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let mut gas_meter = GasMeter::new(gas_limit); let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.instantiate(endowment, gas_meter, &code_hash, data) + ctx.instantiate(endowment, gas_meter, &code_hash, data, &salt) .map(|(_address, output)| output) }); gas_meter.into_dispatch_result(result) @@ -619,7 +556,7 @@ decl_module! { }; // If poking the contract has lead to eviction of the contract, give out the rewards. - if rent::snitch_contract_should_be_evicted::(&dest, handicap) { + if Rent::::snitch_contract_should_be_evicted(&dest, handicap) { T::Currency::deposit_into_existing(&rewarded, T::SurchargeReward::get())?; } } @@ -627,7 +564,10 @@ decl_module! { } /// Public APIs provided by the contracts module. -impl Module { +impl Module +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Perform a call to a specified contract. /// /// This function is similar to `Self::call`, but doesn't perform any address lookups and better @@ -659,12 +599,12 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let maybe_value = storage::read_contract_storage(&contract_info.trie_id, &key); + let maybe_value = Storage::::read(&contract_info.trie_id, &key); Ok(maybe_value) } pub fn rent_projection(address: T::AccountId) -> RentProjectionResult { - rent::compute_rent_projection::(&address) + Rent::::compute_projection(&address) } /// Put code for benchmarks which does not check or instrument the code. @@ -674,15 +614,40 @@ impl Module { let result = wasm::save_code_raw::(code, &schedule); result.map(|_| ()).map_err(Into::into) } + + /// Determine the address of a contract, + /// + /// This is the address generation function used by contract instantation. Its result + /// is only dependend on its inputs. It can therefore be used to reliably predict the + /// address of a contract. This is akin to the formular of eth's CRATE2 opcode. There + /// is no CREATE equivalent because CREATE2 is strictly more powerful. + /// + /// Formula: `hash(deploying_address ++ code_hash ++ salt)` + pub fn contract_address( + deploying_address: &T::AccountId, + code_hash: &CodeHash, + salt: &[u8], + ) -> T::AccountId + { + let buf: Vec<_> = deploying_address.as_ref().iter() + .chain(code_hash.as_ref()) + .chain(salt) + .cloned() + .collect(); + UncheckedFrom::unchecked_from(T::Hashing::hash(&buf)) + } } -impl Module { +impl Module +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ fn execute_wasm( origin: T::AccountId, gas_meter: &mut GasMeter, func: impl FnOnce(&mut ExecutionContext, WasmLoader>, &mut GasMeter) -> ExecResult, ) -> ExecResult { - let cfg = Config::preload(); + let cfg = ConfigCache::preload(); let vm = WasmVm::new(&cfg.schedule); let loader = WasmLoader::new(&cfg.schedule); let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); @@ -694,8 +659,8 @@ decl_event! { pub enum Event where Balance = BalanceOf, - ::AccountId, - ::Hash + ::AccountId, + ::Hash { /// Contract deployed by address at the specified address. \[owner, contract\] Instantiated(AccountId, AccountId), @@ -734,7 +699,10 @@ decl_event! { } decl_storage! { - trait Store for Module as Contracts { + trait Store for Module as Contracts + where + T::AccountId: UncheckedFrom + AsRef<[u8]> + { /// Current cost schedule for contracts. CurrentSchedule get(fn current_schedule) config(): Schedule = Default::default(); /// A mapping from an original code hash to the original code, untouched by instrumentation. @@ -754,7 +722,7 @@ decl_storage! { /// /// We assume that these values can't be changed in the /// course of transaction execution. -pub struct Config { +pub struct ConfigCache { pub schedule: Schedule, pub existential_deposit: BalanceOf, pub tombstone_deposit: BalanceOf, @@ -762,9 +730,12 @@ pub struct Config { pub max_value_size: u32, } -impl Config { - fn preload() -> Config { - Config { +impl ConfigCache +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + fn preload() -> ConfigCache { + ConfigCache { schedule: >::current_schedule(), existential_deposit: T::Currency::minimum_balance(), tombstone_deposit: T::TombstoneDeposit::get(), diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 3dc473363190bc2eab6ed5565087bd24cddb7b12..8b6f81c916bef4f822cb42d50e91566adef49704 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -18,25 +18,29 @@ use crate::{ AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, RawEvent, - TombstoneContractInfo, Trait, CodeHash, Config + TombstoneContractInfo, Config, CodeHash, ConfigCache, Error, }; use sp_std::prelude::*; use sp_io::hashing::blake2_256; +use sp_core::crypto::UncheckedFrom; use frame_support::storage::child; use frame_support::traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReasons}; use frame_support::StorageMap; use pallet_contracts_primitives::{ContractAccessError, RentProjection, RentProjectionResult}; -use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul, SaturatedConversion, Saturating, Zero}; +use sp_runtime::{ + DispatchError, + traits::{Bounded, CheckedDiv, CheckedMul, SaturatedConversion, Saturating, Zero}, +}; /// The amount to charge. /// /// This amount respects the contract's rent allowance and the subsistence deposit. /// Because of that, charging the amount cannot remove the contract. -struct OutstandingAmount { +struct OutstandingAmount { amount: BalanceOf, } -impl OutstandingAmount { +impl OutstandingAmount { /// Create the new outstanding amount. /// /// The amount should be always withdrawable and it should not kill the account. @@ -63,7 +67,7 @@ impl OutstandingAmount { } } -enum Verdict { +enum Verdict { /// The contract is exempted from paying rent. /// /// For example, it already paid its rent in the current block, or it has enough deposit for not @@ -82,405 +86,414 @@ enum Verdict { Charge { amount: OutstandingAmount }, } -/// Returns a fee charged per block from the contract. -/// -/// This function accounts for the storage rent deposit. I.e. if the contract possesses enough funds -/// then the fee can drop to zero. -fn compute_fee_per_block( - free_balance: &BalanceOf, - contract: &AliveContractInfo, -) -> BalanceOf { - let free_storage = free_balance - .checked_div(&T::RentDepositOffset::get()) - .unwrap_or_else(Zero::zero); - - // For now, we treat every empty KV pair as if it was one byte long. - let empty_pairs_equivalent = contract.empty_pair_count; - - let effective_storage_size = >::from( - contract.storage_size + T::StorageSizeOffset::get() + empty_pairs_equivalent, - ) - .saturating_sub(free_storage); - - effective_storage_size - .checked_mul(&T::RentByteFee::get()) - .unwrap_or_else(|| >::max_value()) -} +pub struct Rent(sp_std::marker::PhantomData); -/// Returns amount of funds available to consume by rent mechanism. -/// -/// Rent mechanism cannot consume more than `rent_allowance` set by the contract and it cannot make -/// the balance lower than [`subsistence_threshold`]. -/// -/// In case the toal_balance is below the subsistence threshold, this function returns `None`. -fn rent_budget( - total_balance: &BalanceOf, - free_balance: &BalanceOf, - contract: &AliveContractInfo, -) -> Option> { - let subsistence_threshold = Config::::subsistence_threshold_uncached(); - // Reserved balance contributes towards the subsistence threshold to stay consistent - // with the existential deposit where the reserved balance is also counted. - if *total_balance < subsistence_threshold { - return None; +impl Rent +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + /// Returns a fee charged per block from the contract. + /// + /// This function accounts for the storage rent deposit. I.e. if the contract possesses enough funds + /// then the fee can drop to zero. + fn compute_fee_per_block( + free_balance: &BalanceOf, + contract: &AliveContractInfo + ) -> BalanceOf { + let free_storage = free_balance + .checked_div(&T::RentDepositOffset::get()) + .unwrap_or_else(Zero::zero); + + // For now, we treat every empty KV pair as if it was one byte long. + let empty_pairs_equivalent = contract.empty_pair_count; + + let effective_storage_size = >::from( + contract.storage_size + T::StorageSizeOffset::get() + empty_pairs_equivalent, + ) + .saturating_sub(free_storage); + + effective_storage_size + .checked_mul(&T::RentByteFee::get()) + .unwrap_or_else(|| >::max_value()) } - // However, reserved balance cannot be charged so we need to use the free balance - // to calculate the actual budget (which can be 0). - let rent_allowed_to_charge = free_balance.saturating_sub(subsistence_threshold); - Some(>::min( - contract.rent_allowance, - rent_allowed_to_charge, - )) -} + /// Returns amount of funds available to consume by rent mechanism. + /// + /// Rent mechanism cannot consume more than `rent_allowance` set by the contract and it cannot make + /// the balance lower than [`subsistence_threshold`]. + /// + /// In case the toal_balance is below the subsistence threshold, this function returns `None`. + fn rent_budget( + total_balance: &BalanceOf, + free_balance: &BalanceOf, + contract: &AliveContractInfo, + ) -> Option> { + let subsistence_threshold = ConfigCache::::subsistence_threshold_uncached(); + // Reserved balance contributes towards the subsistence threshold to stay consistent + // with the existential deposit where the reserved balance is also counted. + if *total_balance < subsistence_threshold { + return None; + } -/// Consider the case for rent payment of the given account and returns a `Verdict`. -/// -/// Use `handicap` in case you want to change the reference block number. (To get more details see -/// `snitch_contract_should_be_evicted` ). -fn consider_case( - account: &T::AccountId, - current_block_number: T::BlockNumber, - handicap: T::BlockNumber, - contract: &AliveContractInfo, -) -> Verdict { - // How much block has passed since the last deduction for the contract. - let blocks_passed = { - // Calculate an effective block number, i.e. after adjusting for handicap. - let effective_block_number = current_block_number.saturating_sub(handicap); - effective_block_number.saturating_sub(contract.deduct_block) - }; - if blocks_passed.is_zero() { - // Rent has already been paid - return Verdict::Exempt; + // However, reserved balance cannot be charged so we need to use the free balance + // to calculate the actual budget (which can be 0). + let rent_allowed_to_charge = free_balance.saturating_sub(subsistence_threshold); + Some(>::min( + contract.rent_allowance, + rent_allowed_to_charge, + )) } - let total_balance = T::Currency::total_balance(account); - let free_balance = T::Currency::free_balance(account); + /// Consider the case for rent payment of the given account and returns a `Verdict`. + /// + /// Use `handicap` in case you want to change the reference block number. (To get more details see + /// `snitch_contract_should_be_evicted` ). + fn consider_case( + account: &T::AccountId, + current_block_number: T::BlockNumber, + handicap: T::BlockNumber, + contract: &AliveContractInfo, + ) -> Verdict { + // How much block has passed since the last deduction for the contract. + let blocks_passed = { + // Calculate an effective block number, i.e. after adjusting for handicap. + let effective_block_number = current_block_number.saturating_sub(handicap); + effective_block_number.saturating_sub(contract.deduct_block) + }; + if blocks_passed.is_zero() { + // Rent has already been paid + return Verdict::Exempt; + } - // An amount of funds to charge per block for storage taken up by the contract. - let fee_per_block = compute_fee_per_block::(&free_balance, contract); - if fee_per_block.is_zero() { - // The rent deposit offset reduced the fee to 0. This means that the contract - // gets the rent for free. - return Verdict::Exempt; - } + let total_balance = T::Currency::total_balance(account); + let free_balance = T::Currency::free_balance(account); - let rent_budget = match rent_budget::(&total_balance, &free_balance, contract) { - Some(rent_budget) => rent_budget, - None => { - // The contract's total balance is already below subsistence threshold. That - // indicates that the contract cannot afford to leave a tombstone. - // - // So cleanly wipe the contract. - return Verdict::Kill; + // An amount of funds to charge per block for storage taken up by the contract. + let fee_per_block = Self::compute_fee_per_block(&free_balance, contract); + if fee_per_block.is_zero() { + // The rent deposit offset reduced the fee to 0. This means that the contract + // gets the rent for free. + return Verdict::Exempt; } - }; - - let dues = fee_per_block - .checked_mul(&blocks_passed.saturated_into::().into()) - .unwrap_or_else(|| >::max_value()); - let insufficient_rent = rent_budget < dues; - - // If the rent payment cannot be withdrawn due to locks on the account balance, then evict the - // account. - // - // NOTE: This seems problematic because it provides a way to tombstone an account while - // avoiding the last rent payment. In effect, someone could retroactively set rent_allowance - // for their contract to 0. - let dues_limited = dues.min(rent_budget); - let can_withdraw_rent = T::Currency::ensure_can_withdraw( - account, - dues_limited, - WithdrawReasons::FEE, - free_balance.saturating_sub(dues_limited), - ) - .is_ok(); - - if insufficient_rent || !can_withdraw_rent { - // The contract cannot afford the rent payment and has a balance above the subsistence - // threshold, so it leaves a tombstone. - let amount = if can_withdraw_rent { - Some(OutstandingAmount::new(dues_limited)) - } else { - None + + let rent_budget = match Self::rent_budget(&total_balance, &free_balance, contract) { + Some(rent_budget) => rent_budget, + None => { + // The contract's total balance is already below subsistence threshold. That + // indicates that the contract cannot afford to leave a tombstone. + // + // So cleanly wipe the contract. + return Verdict::Kill; + } + }; + + let dues = fee_per_block + .checked_mul(&blocks_passed.saturated_into::().into()) + .unwrap_or_else(|| >::max_value()); + let insufficient_rent = rent_budget < dues; + + // If the rent payment cannot be withdrawn due to locks on the account balance, then evict the + // account. + // + // NOTE: This seems problematic because it provides a way to tombstone an account while + // avoiding the last rent payment. In effect, someone could retroactively set rent_allowance + // for their contract to 0. + let dues_limited = dues.min(rent_budget); + let can_withdraw_rent = T::Currency::ensure_can_withdraw( + account, + dues_limited, + WithdrawReasons::FEE, + free_balance.saturating_sub(dues_limited), + ) + .is_ok(); + + if insufficient_rent || !can_withdraw_rent { + // The contract cannot afford the rent payment and has a balance above the subsistence + // threshold, so it leaves a tombstone. + let amount = if can_withdraw_rent { + Some(OutstandingAmount::new(dues_limited)) + } else { + None + }; + return Verdict::Evict { amount }; + } + + return Verdict::Charge { + // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. + amount: OutstandingAmount::new(dues_limited), }; - return Verdict::Evict { amount }; } - return Verdict::Charge { - // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. - amount: OutstandingAmount::new(dues_limited), - }; -} + /// Enacts the given verdict and returns the updated `ContractInfo`. + /// + /// `alive_contract_info` should be from the same address as `account`. + fn enact_verdict( + account: &T::AccountId, + alive_contract_info: AliveContractInfo, + current_block_number: T::BlockNumber, + verdict: Verdict, + ) -> Option> { + match verdict { + Verdict::Exempt => return Some(ContractInfo::Alive(alive_contract_info)), + Verdict::Kill => { + >::remove(account); + child::kill_storage( + &alive_contract_info.child_trie_info(), + None, + ); + >::deposit_event(RawEvent::Evicted(account.clone(), false)); + None + } + Verdict::Evict { amount } => { + if let Some(amount) = amount { + amount.withdraw(account); + } + + // Note: this operation is heavy. + let child_storage_root = child::root( + &alive_contract_info.child_trie_info(), + ); + + let tombstone = >::new( + &child_storage_root[..], + alive_contract_info.code_hash, + ); + let tombstone_info = ContractInfo::Tombstone(tombstone); + >::insert(account, &tombstone_info); + + child::kill_storage( + &alive_contract_info.child_trie_info(), + None, + ); + + >::deposit_event(RawEvent::Evicted(account.clone(), true)); + Some(tombstone_info) + } + Verdict::Charge { amount } => { + let contract_info = ContractInfo::Alive(AliveContractInfo:: { + rent_allowance: alive_contract_info.rent_allowance - amount.peek(), + deduct_block: current_block_number, + ..alive_contract_info + }); + >::insert(account, &contract_info); -/// Enacts the given verdict and returns the updated `ContractInfo`. -/// -/// `alive_contract_info` should be from the same address as `account`. -fn enact_verdict( - account: &T::AccountId, - alive_contract_info: AliveContractInfo, - current_block_number: T::BlockNumber, - verdict: Verdict, -) -> Option> { - match verdict { - Verdict::Exempt => return Some(ContractInfo::Alive(alive_contract_info)), - Verdict::Kill => { - >::remove(account); - child::kill_storage( - &alive_contract_info.child_trie_info(), - ); - >::deposit_event(RawEvent::Evicted(account.clone(), false)); - None - } - Verdict::Evict { amount } => { - if let Some(amount) = amount { amount.withdraw(account); + Some(contract_info) } + } + } - // Note: this operation is heavy. - let child_storage_root = child::root( - &alive_contract_info.child_trie_info(), - ); - - let tombstone = >::new( - &child_storage_root[..], - alive_contract_info.code_hash, - ); - let tombstone_info = ContractInfo::Tombstone(tombstone); - >::insert(account, &tombstone_info); + /// Make account paying the rent for the current block number + /// + /// NOTE this function performs eviction eagerly. All changes are read and written directly to + /// storage. + pub fn collect(account: &T::AccountId) -> Option> { + let contract_info = >::get(account); + let alive_contract_info = match contract_info { + None | Some(ContractInfo::Tombstone(_)) => return contract_info, + Some(ContractInfo::Alive(contract)) => contract, + }; - child::kill_storage( - &alive_contract_info.child_trie_info(), - ); + let current_block_number = >::block_number(); + let verdict = Self::consider_case( + account, + current_block_number, + Zero::zero(), + &alive_contract_info, + ); + Self::enact_verdict(account, alive_contract_info, current_block_number, verdict) + } - >::deposit_event(RawEvent::Evicted(account.clone(), true)); - Some(tombstone_info) - } - Verdict::Charge { amount } => { - let contract_info = ContractInfo::Alive(AliveContractInfo:: { - rent_allowance: alive_contract_info.rent_allowance - amount.peek(), - deduct_block: current_block_number, - ..alive_contract_info - }); - >::insert(account, &contract_info); - - amount.withdraw(account); - Some(contract_info) + /// Process a report that a contract under the given address should be evicted. + /// + /// Enact the eviction right away if the contract should be evicted and return true. + /// Otherwise, **do nothing** and return false. + /// + /// The `handicap` parameter gives a way to check the rent to a moment in the past instead + /// of current block. E.g. if the contract is going to be evicted at the current block, + /// `handicap = 1` can defer the eviction for 1 block. This is useful to handicap certain snitchers + /// relative to others. + /// + /// NOTE this function performs eviction eagerly. All changes are read and written directly to + /// storage. + pub fn snitch_contract_should_be_evicted( + account: &T::AccountId, + handicap: T::BlockNumber, + ) -> bool { + let contract_info = >::get(account); + let alive_contract_info = match contract_info { + None | Some(ContractInfo::Tombstone(_)) => return false, + Some(ContractInfo::Alive(contract)) => contract, + }; + let current_block_number = >::block_number(); + let verdict = Self::consider_case( + account, + current_block_number, + handicap, + &alive_contract_info, + ); + + // Enact the verdict only if the contract gets removed. + match verdict { + Verdict::Kill | Verdict::Evict { .. } => { + Self::enact_verdict(account, alive_contract_info, current_block_number, verdict); + true + } + _ => false, } } -} -/// Make account paying the rent for the current block number -/// -/// NOTE this function performs eviction eagerly. All changes are read and written directly to -/// storage. -pub fn collect_rent(account: &T::AccountId) -> Option> { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return contract_info, - Some(ContractInfo::Alive(contract)) => contract, - }; - - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - enact_verdict(account, alive_contract_info, current_block_number, verdict) -} + /// Returns the projected time a given contract will be able to sustain paying its rent. The + /// returned projection is relevant for the current block, i.e. it is as if the contract was + /// accessed at the beginning of the current block. Returns `None` in case if the contract was + /// evicted before or as a result of the rent collection. + /// + /// The returned value is only an estimation. It doesn't take into account any top ups, changing the + /// rent allowance, or any problems coming from withdrawing the dues. + /// + /// NOTE that this is not a side-effect free function! It will actually collect rent and then + /// compute the projection. This function is only used for implementation of an RPC method through + /// `RuntimeApi` meaning that the changes will be discarded anyway. + pub fn compute_projection( + account: &T::AccountId, + ) -> RentProjectionResult { + let contract_info = >::get(account); + let alive_contract_info = match contract_info { + None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), + Some(ContractInfo::Alive(contract)) => contract, + }; + let current_block_number = >::block_number(); + let verdict = Self::consider_case( + account, + current_block_number, + Zero::zero(), + &alive_contract_info, + ); + let new_contract_info = + Self::enact_verdict(account, alive_contract_info, current_block_number, verdict); + + // Check what happened after enaction of the verdict. + let alive_contract_info = match new_contract_info { + None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), + Some(ContractInfo::Alive(contract)) => contract, + }; -/// Process a report that a contract under the given address should be evicted. -/// -/// Enact the eviction right away if the contract should be evicted and return true. -/// Otherwise, **do nothing** and return false. -/// -/// The `handicap` parameter gives a way to check the rent to a moment in the past instead -/// of current block. E.g. if the contract is going to be evicted at the current block, -/// `handicap = 1` can defer the eviction for 1 block. This is useful to handicap certain snitchers -/// relative to others. -/// -/// NOTE this function performs eviction eagerly. All changes are read and written directly to -/// storage. -pub fn snitch_contract_should_be_evicted( - account: &T::AccountId, - handicap: T::BlockNumber, -) -> bool { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return false, - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - handicap, - &alive_contract_info, - ); - - // Enact the verdict only if the contract gets removed. - match verdict { - Verdict::Kill | Verdict::Evict { .. } => { - enact_verdict(account, alive_contract_info, current_block_number, verdict); - true + // Compute how much would the fee per block be with the *updated* balance. + let total_balance = T::Currency::total_balance(account); + let free_balance = T::Currency::free_balance(account); + let fee_per_block = Self::compute_fee_per_block(&free_balance, &alive_contract_info); + if fee_per_block.is_zero() { + return Ok(RentProjection::NoEviction); } - _ => false, - } -} -/// Returns the projected time a given contract will be able to sustain paying its rent. The -/// returned projection is relevant for the current block, i.e. it is as if the contract was -/// accessed at the beginning of the current block. Returns `None` in case if the contract was -/// evicted before or as a result of the rent collection. -/// -/// The returned value is only an estimation. It doesn't take into account any top ups, changing the -/// rent allowance, or any problems coming from withdrawing the dues. -/// -/// NOTE that this is not a side-effect free function! It will actually collect rent and then -/// compute the projection. This function is only used for implementation of an RPC method through -/// `RuntimeApi` meaning that the changes will be discarded anyway. -pub fn compute_rent_projection( - account: &T::AccountId, -) -> RentProjectionResult { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - let new_contract_info = - enact_verdict(account, alive_contract_info, current_block_number, verdict); - - // Check what happened after enaction of the verdict. - let alive_contract_info = match new_contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - - // Compute how much would the fee per block be with the *updated* balance. - let total_balance = T::Currency::total_balance(account); - let free_balance = T::Currency::free_balance(account); - let fee_per_block = compute_fee_per_block::(&free_balance, &alive_contract_info); - if fee_per_block.is_zero() { - return Ok(RentProjection::NoEviction); + // Then compute how much the contract will sustain under these circumstances. + let rent_budget = Self::rent_budget(&total_balance, &free_balance, &alive_contract_info).expect( + "the contract exists and in the alive state; + the updated balance must be greater than subsistence deposit; + this function doesn't return `None`; + qed + ", + ); + let blocks_left = match rent_budget.checked_div(&fee_per_block) { + Some(blocks_left) => blocks_left, + None => { + // `fee_per_block` is not zero here, so `checked_div` can return `None` if + // there is an overflow. This cannot happen with integers though. Return + // `NoEviction` here just in case. + return Ok(RentProjection::NoEviction); + } + }; + + let blocks_left = blocks_left.saturated_into::().into(); + Ok(RentProjection::EvictionAt( + current_block_number + blocks_left, + )) } - // Then compute how much the contract will sustain under these circumstances. - let rent_budget = rent_budget::(&total_balance, &free_balance, &alive_contract_info).expect( - "the contract exists and in the alive state; - the updated balance must be greater than subsistence deposit; - this function doesn't return `None`; - qed - ", - ); - let blocks_left = match rent_budget.checked_div(&fee_per_block) { - Some(blocks_left) => blocks_left, - None => { - // `fee_per_block` is not zero here, so `checked_div` can return `None` if - // there is an overflow. This cannot happen with integers though. Return - // `NoEviction` here just in case. - return Ok(RentProjection::NoEviction); + /// Restores the destination account using the origin as prototype. + /// + /// The restoration will be performed iff: + /// - origin exists and is alive, + /// - the origin's storage is not written in the current block + /// - the restored account has tombstone + /// - the tombstone matches the hash of the origin storage root, and code hash. + /// + /// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to + /// the restored account. The restored account will inherit the last write block and its last + /// deduct block will be set to the current block. + pub fn restore_to( + origin: T::AccountId, + dest: T::AccountId, + code_hash: CodeHash, + rent_allowance: BalanceOf, + delta: Vec, + ) -> Result<(), DispatchError> { + let mut origin_contract = >::get(&origin) + .and_then(|c| c.get_alive()) + .ok_or(Error::::InvalidSourceContract)?; + + let child_trie_info = origin_contract.child_trie_info(); + + let current_block = >::block_number(); + + if origin_contract.last_write == Some(current_block) { + return Err(Error::::InvalidContractOrigin.into()); } - }; - let blocks_left = blocks_left.saturated_into::().into(); - Ok(RentProjection::EvictionAt( - current_block_number + blocks_left, - )) -} + let dest_tombstone = >::get(&dest) + .and_then(|c| c.get_tombstone()) + .ok_or(Error::::InvalidDestinationContract)?; -/// Restores the destination account using the origin as prototype. -/// -/// The restoration will be performed iff: -/// - origin exists and is alive, -/// - the origin's storage is not written in the current block -/// - the restored account has tombstone -/// - the tombstone matches the hash of the origin storage root, and code hash. -/// -/// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to -/// the restored account. The restored account will inherit the last write block and its last -/// deduct block will be set to the current block. -pub fn restore_to( - origin: T::AccountId, - dest: T::AccountId, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, -) -> Result<(), &'static str> { - let mut origin_contract = >::get(&origin) - .and_then(|c| c.get_alive()) - .ok_or("Cannot restore from inexisting or tombstone contract")?; - - let child_trie_info = origin_contract.child_trie_info(); - - let current_block = >::block_number(); - - if origin_contract.last_write == Some(current_block) { - return Err("Origin TrieId written in the current block"); - } + let last_write = if !delta.is_empty() { + Some(current_block) + } else { + origin_contract.last_write + }; - let dest_tombstone = >::get(&dest) - .and_then(|c| c.get_tombstone()) - .ok_or("Cannot restore to inexisting or alive contract")?; - - let last_write = if !delta.is_empty() { - Some(current_block) - } else { - origin_contract.last_write - }; - - let key_values_taken = delta.iter() - .filter_map(|key| { - child::get_raw(&child_trie_info, &blake2_256(key)).map(|value| { - child::kill(&child_trie_info, &blake2_256(key)); - (key, value) + let key_values_taken = delta.iter() + .filter_map(|key| { + child::get_raw(&child_trie_info, &blake2_256(key)).map(|value| { + child::kill(&child_trie_info, &blake2_256(key)); + (key, value) + }) }) - }) - .collect::>(); - - let tombstone = >::new( - // This operation is cheap enough because last_write (delta not included) - // is not this block as it has been checked earlier. - &child::root(&child_trie_info)[..], - code_hash, - ); - - if tombstone != dest_tombstone { - for (key, value) in key_values_taken { - child::put_raw(&child_trie_info, &blake2_256(key), &value); + .collect::>(); + + let tombstone = >::new( + // This operation is cheap enough because last_write (delta not included) + // is not this block as it has been checked earlier. + &child::root(&child_trie_info)[..], + code_hash, + ); + + if tombstone != dest_tombstone { + for (key, value) in key_values_taken { + child::put_raw(&child_trie_info, &blake2_256(key), &value); + } + return Err(Error::::InvalidTombstone.into()); } - return Err("Tombstones don't match"); + origin_contract.storage_size -= key_values_taken.iter() + .map(|(_, value)| value.len() as u32) + .sum::(); + + >::remove(&origin); + >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { + trie_id: origin_contract.trie_id, + storage_size: origin_contract.storage_size, + empty_pair_count: origin_contract.empty_pair_count, + total_pair_count: origin_contract.total_pair_count, + code_hash, + rent_allowance, + deduct_block: current_block, + last_write, + })); + + let origin_free_balance = T::Currency::free_balance(&origin); + T::Currency::make_free_balance_be(&origin, >::zero()); + T::Currency::deposit_creating(&dest, origin_free_balance); + + Ok(()) } - - origin_contract.storage_size -= key_values_taken.iter() - .map(|(_, value)| value.len() as u32) - .sum::(); - - >::remove(&origin); - >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { - trie_id: origin_contract.trie_id, - storage_size: origin_contract.storage_size, - empty_pair_count: origin_contract.empty_pair_count, - total_pair_count: origin_contract.total_pair_count, - code_hash, - rent_allowance, - deduct_block: current_block, - last_write, - })); - - let origin_free_balance = T::Currency::free_balance(&origin); - T::Currency::make_free_balance_be(&origin, >::zero()); - T::Currency::deposit_creating(&dest, origin_free_balance); - - Ok(()) } diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index ff2cde2297118349815783dcc11a1b8df9df7679..df1ea240630cd26c9a1bd338547b8e3fb327213f 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -17,7 +17,7 @@ //! This module contains the cost schedule and supporting code that constructs a //! sane default schedule from a `WeightInfo` implementation. -use crate::{Trait, weights::WeightInfo}; +use crate::{Config, weights::WeightInfo}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; @@ -42,7 +42,7 @@ pub const INSTR_BENCHMARK_BATCH_SIZE: u32 = 1_000; #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(bound(serialize = "", deserialize = "")))] #[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug)] -pub struct Schedule { +pub struct Schedule { /// Version of the schedule. pub version: u32, @@ -69,7 +69,7 @@ pub struct Limits { /// Maximum allowed stack height in number of elements. /// - /// See https://wiki.parity.io/WebAssembly-StackHeight to find out + /// See to find out /// how the stack frame cost is calculated. Each element can be of one of the /// wasm value types. This means the maximum size per element is 64bit. pub stack_height: u32, @@ -131,7 +131,7 @@ pub struct Limits { /// and dropping return values in order to maintain a valid module. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Clone, Encode, Decode, PartialEq, Eq, WeightDebug)] -pub struct InstructionWeights { +pub struct InstructionWeights { pub i64const: u32, pub i64load: u32, pub i64store: u32, @@ -190,7 +190,7 @@ pub struct InstructionWeights { /// Describes the weight for each imported function that a contract is allowed to call. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Clone, Encode, Decode, PartialEq, Eq, WeightDebug)] -pub struct HostFnWeights { +pub struct HostFnWeights { /// Weight of calling `seal_caller`. pub caller: Weight, @@ -302,6 +302,9 @@ pub struct HostFnWeights { /// Weight per output byte received through `seal_instantiate`. pub instantiate_per_output_byte: Weight, + /// Weight per salt byte supplied to `seal_instantiate`. + pub instantiate_per_salt_byte: Weight, + /// Weight of calling `seal_hash_sha_256`. pub hash_sha2_256: Weight, @@ -407,7 +410,7 @@ macro_rules! cost_byte_batched { } } -impl Default for Schedule { +impl Default for Schedule { fn default() -> Self { Self { version: 0, @@ -437,7 +440,7 @@ impl Default for Limits { } } -impl Default for InstructionWeights { +impl Default for InstructionWeights { fn default() -> Self { let max_pages = Limits::default().memory_pages; Self { @@ -497,7 +500,7 @@ impl Default for InstructionWeights { } } -impl Default for HostFnWeights { +impl Default for HostFnWeights { fn default() -> Self { Self { caller: cost_batched!(seal_caller), @@ -535,8 +538,9 @@ impl Default for HostFnWeights { call_per_input_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 1, 0), call_per_output_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 0, 1), instantiate: cost_batched!(seal_instantiate), - instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_kb, 1, 0), - instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_kb, 0, 1), + instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 1, 0, 0), + instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 1, 0), + instantiate_per_salt_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 0, 1), hash_sha2_256: cost_batched!(seal_hash_sha2_256), hash_sha2_256_per_byte: cost_byte_batched!(seal_hash_sha2_256_per_kb), hash_keccak_256: cost_batched!(seal_hash_keccak_256), @@ -550,12 +554,12 @@ impl Default for HostFnWeights { } } -struct ScheduleRules<'a, T: Trait> { +struct ScheduleRules<'a, T: Config> { schedule: &'a Schedule, params: Vec, } -impl Schedule { +impl Schedule { pub fn rules(&self, module: &elements::Module) -> impl rules::Rules + '_ { ScheduleRules { schedule: &self, @@ -572,7 +576,7 @@ impl Schedule { } } -impl<'a, T: Trait> rules::Rules for ScheduleRules<'a, T> { +impl<'a, T: Config> rules::Rules for ScheduleRules<'a, T> { fn instruction_cost(&self, instruction: &elements::Instruction) -> Option { use parity_wasm::elements::Instruction::*; let w = &self.schedule.instruction_weights; diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 3740952778fd33d85b53f3e2bca8a6f5bef64881..ba09adb285b938fd8259ecd7ade37b70c9cf4be1 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -18,11 +18,14 @@ use crate::{ exec::{AccountIdOf, StorageKey}, - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Config, TrieId, + AccountCounter, }; use sp_std::prelude::*; +use sp_std::marker::PhantomData; use sp_io::hashing::blake2_256; use sp_runtime::traits::Bounded; +use sp_core::crypto::UncheckedFrom; use frame_support::{storage::child, StorageMap}; /// An error that means that the account requested either doesn't exist or represents a tombstone @@ -30,167 +33,196 @@ use frame_support::{storage::child, StorageMap}; #[cfg_attr(test, derive(PartialEq, Eq, Debug))] pub struct ContractAbsentError; -/// Reads a storage kv pair of a contract. -/// -/// The read is performed from the `trie_id` only. The `address` is not necessary. If the contract -/// doesn't store under the given `key` `None` is returned. -pub fn read_contract_storage(trie_id: &TrieId, key: &StorageKey) -> Option> { - child::get_raw(&crate::child_trie_info(&trie_id), &blake2_256(key)) -} - -/// Update a storage entry into a contract's kv storage. -/// -/// If the `opt_new_value` is `None` then the kv pair is removed. -/// -/// This function also updates the bookkeeping info such as: number of total non-empty pairs a -/// contract owns, the last block the storage was written to, etc. That's why, in contrast to -/// `read_contract_storage`, this function also requires the `account` ID. -/// -/// If the contract specified by the id `account` doesn't exist `Err` is returned.` -pub fn write_contract_storage( - account: &AccountIdOf, - trie_id: &TrieId, - key: &StorageKey, - opt_new_value: Option>, -) -> Result<(), ContractAbsentError> { - let mut new_info = match >::get(account) { - Some(ContractInfo::Alive(alive)) => alive, - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAbsentError), - }; - - let hashed_key = blake2_256(key); - let child_trie_info = &crate::child_trie_info(&trie_id); - - // In order to correctly update the book keeping we need to fetch the previous - // value of the key-value pair. - // - // It might be a bit more clean if we had an API that supported getting the size - // of the value without going through the loading of it. But at the moment of - // writing, there is no such API. - // - // That's not a show stopper in any case, since the performance cost is - // dominated by the trie traversal anyway. - let opt_prev_value = child::get_raw(&child_trie_info, &hashed_key); - - // Update the total number of KV pairs and the number of empty pairs. - match (&opt_prev_value, &opt_new_value) { - (Some(prev_value), None) => { - new_info.total_pair_count -= 1; - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - }, - (None, Some(new_value)) => { - new_info.total_pair_count += 1; - if new_value.is_empty() { - new_info.empty_pair_count += 1; - } - }, - (Some(prev_value), Some(new_value)) => { - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - if new_value.is_empty() { - new_info.empty_pair_count += 1; +pub struct Storage(PhantomData); + +impl Storage +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ + /// Reads a storage kv pair of a contract. + /// + /// The read is performed from the `trie_id` only. The `address` is not necessary. If the contract + /// doesn't store under the given `key` `None` is returned. + pub fn read(trie_id: &TrieId, key: &StorageKey) -> Option> { + child::get_raw(&crate::child_trie_info(&trie_id), &blake2_256(key)) + } + + /// Update a storage entry into a contract's kv storage. + /// + /// If the `opt_new_value` is `None` then the kv pair is removed. + /// + /// This function also updates the bookkeeping info such as: number of total non-empty pairs a + /// contract owns, the last block the storage was written to, etc. That's why, in contrast to + /// `read`, this function also requires the `account` ID. + /// + /// If the contract specified by the id `account` doesn't exist `Err` is returned.` + pub fn write( + account: &AccountIdOf, + trie_id: &TrieId, + key: &StorageKey, + opt_new_value: Option>, + ) -> Result<(), ContractAbsentError> { + let mut new_info = match >::get(account) { + Some(ContractInfo::Alive(alive)) => alive, + None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAbsentError), + }; + + let hashed_key = blake2_256(key); + let child_trie_info = &crate::child_trie_info(&trie_id); + + // In order to correctly update the book keeping we need to fetch the previous + // value of the key-value pair. + // + // It might be a bit more clean if we had an API that supported getting the size + // of the value without going through the loading of it. But at the moment of + // writing, there is no such API. + // + // That's not a show stopper in any case, since the performance cost is + // dominated by the trie traversal anyway. + let opt_prev_value = child::get_raw(&child_trie_info, &hashed_key); + + // Update the total number of KV pairs and the number of empty pairs. + match (&opt_prev_value, &opt_new_value) { + (Some(prev_value), None) => { + new_info.total_pair_count -= 1; + if prev_value.is_empty() { + new_info.empty_pair_count -= 1; + } + }, + (None, Some(new_value)) => { + new_info.total_pair_count += 1; + if new_value.is_empty() { + new_info.empty_pair_count += 1; + } + }, + (Some(prev_value), Some(new_value)) => { + if prev_value.is_empty() { + new_info.empty_pair_count -= 1; + } + if new_value.is_empty() { + new_info.empty_pair_count += 1; + } } + (None, None) => {} + } + + // Update the total storage size. + let prev_value_len = opt_prev_value + .as_ref() + .map(|old_value| old_value.len() as u32) + .unwrap_or(0); + let new_value_len = opt_new_value + .as_ref() + .map(|new_value| new_value.len() as u32) + .unwrap_or(0); + new_info.storage_size = new_info + .storage_size + .saturating_add(new_value_len) + .saturating_sub(prev_value_len); + + new_info.last_write = Some(>::block_number()); + >::insert(&account, ContractInfo::Alive(new_info)); + + // Finally, perform the change on the storage. + match opt_new_value { + Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), + None => child::kill(&child_trie_info, &hashed_key), } - (None, None) => {} + + Ok(()) } - // Update the total storage size. - let prev_value_len = opt_prev_value - .as_ref() - .map(|old_value| old_value.len() as u32) - .unwrap_or(0); - let new_value_len = opt_new_value - .as_ref() - .map(|new_value| new_value.len() as u32) - .unwrap_or(0); - new_info.storage_size = new_info - .storage_size - .saturating_add(new_value_len) - .saturating_sub(prev_value_len); - - new_info.last_write = Some(>::block_number()); - >::insert(&account, ContractInfo::Alive(new_info)); - - // Finally, perform the change on the storage. - match opt_new_value { - Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), - None => child::kill(&child_trie_info, &hashed_key), + /// Returns the rent allowance set for the contract give by the account id. + pub fn rent_allowance( + account: &AccountIdOf, + ) -> Result, ContractAbsentError> + { + >::get(account) + .and_then(|i| i.as_alive().map(|i| i.rent_allowance)) + .ok_or(ContractAbsentError) } - Ok(()) -} - -/// Returns the rent allowance set for the contract give by the account id. -pub fn rent_allowance( - account: &AccountIdOf, -) -> Result, ContractAbsentError> { - >::get(account) - .and_then(|i| i.as_alive().map(|i| i.rent_allowance)) - .ok_or(ContractAbsentError) -} - -/// Set the rent allowance for the contract given by the account id. -/// -/// Returns `Err` if the contract doesn't exist or is a tombstone. -pub fn set_rent_allowance( - account: &AccountIdOf, - rent_allowance: BalanceOf, -) -> Result<(), ContractAbsentError> { - >::mutate(account, |maybe_contract_info| match maybe_contract_info { - Some(ContractInfo::Alive(ref mut alive_info)) => { - alive_info.rent_allowance = rent_allowance; - Ok(()) - } - _ => Err(ContractAbsentError), - }) -} - -/// Returns the code hash of the contract specified by `account` ID. -#[cfg(test)] -pub fn code_hash(account: &AccountIdOf) -> Result, ContractAbsentError> { - >::get(account) - .and_then(|i| i.as_alive().map(|i| i.code_hash)) - .ok_or(ContractAbsentError) -} - -/// Creates a new contract descriptor in the storage with the given code hash at the given address. -/// -/// Returns `Err` if there is already a contract (or a tombstone) exists at the given address. -pub fn place_contract( - account: &AccountIdOf, - trie_id: TrieId, - ch: CodeHash, -) -> Result<(), &'static str> { - >::mutate(account, |maybe_contract_info| { - if maybe_contract_info.is_some() { - return Err("Alive contract or tombstone already exists"); - } + /// Set the rent allowance for the contract given by the account id. + /// + /// Returns `Err` if the contract doesn't exist or is a tombstone. + pub fn set_rent_allowance( + account: &AccountIdOf, + rent_allowance: BalanceOf, + ) -> Result<(), ContractAbsentError> { + >::mutate(account, |maybe_contract_info| match maybe_contract_info { + Some(ContractInfo::Alive(ref mut alive_info)) => { + alive_info.rent_allowance = rent_allowance; + Ok(()) + } + _ => Err(ContractAbsentError), + }) + } - *maybe_contract_info = Some( - AliveContractInfo:: { - code_hash: ch, - storage_size: 0, - trie_id, - deduct_block: >::block_number(), - rent_allowance: >::max_value(), - empty_pair_count: 0, - total_pair_count: 0, - last_write: None, + /// Creates a new contract descriptor in the storage with the given code hash at the given address. + /// + /// Returns `Err` if there is already a contract (or a tombstone) exists at the given address. + pub fn place_contract( + account: &AccountIdOf, + trie_id: TrieId, + ch: CodeHash, + ) -> Result<(), &'static str> { + >::mutate(account, |maybe_contract_info| { + if maybe_contract_info.is_some() { + return Err("Alive contract or tombstone already exists"); } - .into(), - ); - Ok(()) - }) -} - -/// Removes the contract and all the storage associated with it. -/// -/// This function doesn't affect the account. -pub fn destroy_contract(address: &AccountIdOf, trie_id: &TrieId) { - >::remove(address); - child::kill_storage(&crate::child_trie_info(&trie_id)); -} + *maybe_contract_info = Some( + AliveContractInfo:: { + code_hash: ch, + storage_size: 0, + trie_id, + deduct_block: >::block_number(), + rent_allowance: >::max_value(), + empty_pair_count: 0, + total_pair_count: 0, + last_write: None, + } + .into(), + ); + + Ok(()) + }) + } + + /// Removes the contract and all the storage associated with it. + /// + /// This function doesn't affect the account. + pub fn destroy_contract(address: &AccountIdOf, trie_id: &TrieId) { + >::remove(address); + child::kill_storage(&crate::child_trie_info(&trie_id), None); + } + + /// This generator uses inner counter for account id and applies the hash over `AccountId + + /// accountid_counter`. + pub fn generate_trie_id(account_id: &AccountIdOf) -> TrieId { + use frame_support::StorageValue; + use sp_runtime::traits::Hash; + // Note that skipping a value due to error is not an issue here. + // We only need uniqueness, not sequence. + let new_seed = AccountCounter::mutate(|v| { + *v = v.wrapping_add(1); + *v + }); + + let buf: Vec<_> = account_id.as_ref().iter() + .chain(&new_seed.to_le_bytes()) + .cloned() + .collect(); + T::Hashing::hash(&buf).as_ref().into() + } + + /// Returns the code hash of the contract specified by `account` ID. + #[cfg(test)] + pub fn code_hash(account: &AccountIdOf) -> Result, ContractAbsentError> + { + >::get(account) + .and_then(|i| i.as_alive().map(|i| i.code_hash)) + .ok_or(ContractAbsentError) + } +} \ No newline at end of file diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 05e46a3ab1585bd1cbb991e11b6090e563c6206f..c0b9b671068d6705e5375b15af41891b86a49f9e 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -15,21 +15,21 @@ // along with Substrate. If not, see . use crate::{ - BalanceOf, ContractAddressFor, ContractInfo, ContractInfoOf, GenesisConfig, Module, - RawAliveContractInfo, RawEvent, Trait, TrieId, Schedule, TrieIdGenerator, gas::Gas, - Error, Config, RuntimeReturnCode, + BalanceOf, ContractInfo, ContractInfoOf, GenesisConfig, Module, + RawAliveContractInfo, RawEvent, Config, Schedule, gas::Gas, + Error, ConfigCache, RuntimeReturnCode, storage::Storage, + exec::AccountIdOf, }; use assert_matches::assert_matches; -use hex_literal::*; use codec::Encode; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, Hash, IdentityLookup, Convert}, testing::{Header, H256}, + AccountId32, }; use frame_support::{ assert_ok, assert_err_ignore_postinfo, impl_outer_dispatch, impl_outer_event, - impl_outer_origin, parameter_types, StorageMap, StorageValue, + impl_outer_origin, parameter_types, StorageMap, traits::{Currency, ReservableCurrency}, weights::{Weight, PostDispatchInfo}, dispatch::DispatchErrorWithPostInfo, @@ -66,28 +66,30 @@ impl_outer_dispatch! { #[macro_use] pub mod test_utils { use super::{Test, Balances}; - use crate::{ContractInfoOf, TrieIdGenerator, CodeHash}; - use crate::storage::{write_contract_storage, read_contract_storage}; - use crate::exec::StorageKey; + use crate::{ + ContractInfoOf, CodeHash, + storage::Storage, + exec::{StorageKey, AccountIdOf}, + }; use frame_support::{StorageMap, traits::Currency}; - pub fn set_storage(addr: &u64, key: &StorageKey, value: Option>) { + pub fn set_storage(addr: &AccountIdOf, key: &StorageKey, value: Option>) { let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); - write_contract_storage::(&1, &contract_info.trie_id, key, value).unwrap(); + Storage::::write(addr, &contract_info.trie_id, key, value).unwrap(); } - pub fn get_storage(addr: &u64, key: &StorageKey) -> Option> { + pub fn get_storage(addr: &AccountIdOf, key: &StorageKey) -> Option> { let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); - read_contract_storage(&contract_info.trie_id, key) + Storage::::read(&contract_info.trie_id, key) } - pub fn place_contract(address: &u64, code_hash: CodeHash) { - let trie_id = ::TrieIdGenerator::trie_id(address); - crate::storage::place_contract::(&address, trie_id, code_hash).unwrap() + pub fn place_contract(address: &AccountIdOf, code_hash: CodeHash) { + let trie_id = Storage::::generate_trie_id(address); + Storage::::place_contract(&address, trie_id, code_hash).unwrap() } - pub fn set_balance(who: &u64, amount: u64) { + pub fn set_balance(who: &AccountIdOf, amount: u64) { let imbalance = Balances::deposit_creating(who, amount); drop(imbalance); } - pub fn get_balance(who: &u64) -> u64 { + pub fn get_balance(who: &AccountIdOf) -> u64 { Balances::free_balance(who) } macro_rules! assert_return_code { @@ -102,31 +104,26 @@ pub mod test_utils { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; type Call = Call; type Hashing = BlakeTwo256; - type AccountId = u64; + type AccountId = AccountId32; type Lookup = IdentityLookup; type Header = Header; type Event = MetaEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -134,7 +131,7 @@ impl frame_system::Trait for Test { type OnKilledAccount = (); type SystemWeightInfo = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = MetaEvent; @@ -146,7 +143,7 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 1; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -173,13 +170,11 @@ impl Convert> for Test { } } -impl Trait for Test { +impl Config for Test { type Time = Timestamp; type Randomness = Randomness; type Currency = Balances; - type DetermineContractAddress = DummyContractAddressFor; type Event = MetaEvent; - type TrieIdGenerator = DummyTrieIdGenerator; type RentPayment = (); type SignedClaimHandicap = SignedClaimHandicap; type TombstoneDeposit = TombstoneDeposit; @@ -199,32 +194,10 @@ type Contracts = Module; type System = frame_system::Module; type Randomness = pallet_randomness_collective_flip::Module; -pub struct DummyContractAddressFor; -impl ContractAddressFor for DummyContractAddressFor { - fn contract_address_for(_code_hash: &H256, _data: &[u8], origin: &u64) -> u64 { - *origin + 1 - } -} - -pub struct DummyTrieIdGenerator; -impl TrieIdGenerator for DummyTrieIdGenerator { - fn trie_id(account_id: &u64) -> TrieId { - let new_seed = super::AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - let mut res = vec![]; - res.extend_from_slice(&new_seed.to_le_bytes()); - res.extend_from_slice(&account_id.to_le_bytes()); - res - } -} - -const ALICE: u64 = 1; -const BOB: u64 = 2; -const CHARLIE: u64 = 3; -const DJANGO: u64 = 4; +pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); +pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); +pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); const GAS_LIMIT: Gas = 10_000_000_000; @@ -272,7 +245,7 @@ fn compile_module( fixture_name: &str, ) -> wat::Result<(Vec, ::Output)> where - T: frame_system::Trait, + T: frame_system::Config, { let fixture_path = ["fixtures/", fixture_name, ".wat"].concat(); let wasm_binary = wat::parse_file(fixture_path)?; @@ -309,8 +282,8 @@ fn account_removal_does_not_remove_storage() { use self::test_utils::{set_storage, get_storage}; ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let trie_id1 = ::TrieIdGenerator::trie_id(&1); - let trie_id2 = ::TrieIdGenerator::trie_id(&2); + let trie_id1 = Storage::::generate_trie_id(&ALICE); + let trie_id2 = Storage::::generate_trie_id(&BOB); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -388,7 +361,7 @@ fn instantiate_and_call_and_deposit_event() { .build() .execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence = super::Config::::subsistence_threshold_uncached(); + let subsistence = super::ConfigCache::::subsistence_threshold_uncached(); assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); @@ -399,17 +372,21 @@ fn instantiate_and_call_and_deposit_event() { GAS_LIMIT, code_hash.into(), vec![], + vec![], ); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + event: MetaEvent::system(frame_system::RawEvent::NewAccount(ALICE.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), + event: MetaEvent::balances( + pallet_balances::RawEvent::Endowed(ALICE, 1_000_000) + ), topics: vec![], }, EventRecord { @@ -419,37 +396,39 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), + event: MetaEvent::system(frame_system::RawEvent::NewAccount(addr.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, subsistence) + pallet_balances::RawEvent::Endowed(addr.clone(), subsistence) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(ALICE, BOB, subsistence) + pallet_balances::RawEvent::Transfer(ALICE, addr.clone(), subsistence) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::ContractExecution(BOB, vec![1, 2, 3, 4])), + event: MetaEvent::contracts( + RawEvent::ContractExecution(addr.clone(), vec![1, 2, 3, 4]) + ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), + event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, addr.clone())), topics: vec![], } ]); assert_ok!(creation); - assert!(ContractInfoOf::::contains_key(BOB)); + assert!(ContractInfoOf::::contains_key(&addr)); }); } @@ -470,29 +449,34 @@ fn deposit_event_max_value_limit() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(addr.clone()) + .unwrap() + .get_alive() + .unwrap(); assert_eq!(bob_contract.rent_allowance, >::max_value()); // Call contract with allowed storage value. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT * 2, // we are copying a huge buffer, - ::MaxValueSize::get().encode(), + ::MaxValueSize::get().encode(), )); // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, GAS_LIMIT, - (::MaxValueSize::get() + 1).encode(), + (::MaxValueSize::get() + 1).encode(), ), Error::::ValueTooLarge, ); @@ -517,14 +501,16 @@ fn run_out_of_gas() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Call the contract with a fixed gas limit. It must run out of gas because it just // loops forever. assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, // newly created account + addr, // newly created account 0, 67_500_000, vec![], @@ -536,21 +522,19 @@ fn run_out_of_gas() { /// Input data for each call in set_rent code mod call { - pub fn set_storage_4_byte() -> Vec { vec![] } - pub fn remove_storage_4_byte() -> Vec { vec![0] } - pub fn transfer() -> Vec { vec![0, 0] } - pub fn null() -> Vec { vec![0, 0, 0] } + use super::{AccountIdOf, Test}; + pub fn set_storage_4_byte() -> Vec { 0u32.to_le_bytes().to_vec() } + pub fn remove_storage_4_byte() -> Vec { 1u32.to_le_bytes().to_vec() } + pub fn transfer(to: &AccountIdOf) -> Vec { + 2u32.to_le_bytes().iter().chain(AsRef::<[u8]>::as_ref(to)).cloned().collect() + } + pub fn null() -> Vec { 3u32.to_le_bytes().to_vec() } } /// Test correspondence of set_rent code and its hash. /// Also test that encoded extrinsic in code correspond to the correct transfer #[test] fn test_set_rent_code_and_hash() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); ExtBuilder::default() @@ -565,12 +549,14 @@ fn test_set_rent_code_and_hash() { assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + event: MetaEvent::system(frame_system::RawEvent::NewAccount(ALICE)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + ALICE, 1_000_000 + )), topics: vec![], }, EventRecord { @@ -599,9 +585,11 @@ fn storage_size() { 30_000, GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + ::Balance::from(1_000u32).encode(), // rent allowance + vec![], )); - let bob_contract = ContractInfoOf::::get(BOB) + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -620,12 +608,12 @@ fn storage_size() { assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, call::set_storage_4_byte() )); - let bob_contract = ContractInfoOf::::get(BOB) + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -644,12 +632,12 @@ fn storage_size() { assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, call::remove_storage_4_byte() )); - let bob_contract = ContractInfoOf::::get(BOB) + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -683,8 +671,10 @@ fn empty_kv_pairs() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); - let bob_contract = ContractInfoOf::::get(BOB) + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let bob_contract = ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap(); @@ -729,90 +719,98 @@ fn deduct_blocks() { Origin::signed(ALICE), 30_000, GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + ::Balance::from(1_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, 1_000); // Advance 4 blocks initialize_block(5); // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Check result let rent = (8 + 4 - 3) // storage size = size_offset + deploy_set_storage - deposit_offset * 4 // rent byte price * 4; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, 1_000 - rent); assert_eq!(bob_contract.deduct_block, 5); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent); + assert_eq!(Balances::free_balance(&addr), 30_000 - rent); // Advance 7 blocks more initialize_block(12); // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Check result let rent_2 = (8 + 4 - 2) // storage size = size_offset + deploy_set_storage - deposit_offset * 4 // rent byte price * 7; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); + assert_eq!(Balances::free_balance(&addr), 30_000 - rent - rent_2); // Second call on same block should have no effect on rent - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); + assert_eq!(Balances::free_balance(&addr), 30_000 - rent - rent_2); }); } #[test] fn call_contract_removals() { - removals(|| { + removals(|addr| { // Call on already-removed account might fail, and this is fine. - let _ = Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()); + let _ = Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, call::null()); true }); } #[test] fn inherent_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok()); + removals(|addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok()); } #[test] fn signed_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok()); + removals(|addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok()); } #[test] fn claim_surcharge_malus() { // Test surcharge malus for inherent - claim_surcharge(4, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), false); + claim_surcharge(4, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); + claim_surcharge(3, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); + claim_surcharge(2, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); + claim_surcharge(1, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), false); // Test surcharge malus for signed - claim_surcharge(4, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); + claim_surcharge(4, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), true); + claim_surcharge(3, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); + claim_surcharge(2, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); + claim_surcharge(1, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); } /// Claim surcharge with the given trigger_call at the given blocks. /// If `removes` is true then assert that the contract is a tombstone. -fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) { +fn claim_surcharge(blocks: u64, trigger_call: impl Fn(AccountIdOf) -> bool, removes: bool) { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); ExtBuilder::default() @@ -826,19 +824,21 @@ fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) Origin::signed(ALICE), 100, GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + ::Balance::from(1_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Advance blocks initialize_block(blocks); // Trigger rent through call - assert!(trigger_call()); + assert!(trigger_call(addr.clone())); if removes { - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); } else { - assert!(ContractInfoOf::::get(BOB).unwrap().get_alive().is_some()); + assert!(ContractInfoOf::::get(&addr).unwrap().get_alive().is_some()); } }); } @@ -848,7 +848,7 @@ fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) /// * if allowance is exceeded /// * if balance is reached and balance < subsistence threshold /// * this case cannot be triggered by a contract: we check whether a tombstone is left -fn removals(trigger_call: impl Fn() -> bool) { +fn removals(trigger_call: impl Fn(AccountIdOf) -> bool) { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); // Balance reached and superior to subsistence threshold @@ -863,31 +863,33 @@ fn removals(trigger_call: impl Fn() -> bool) { Origin::signed(ALICE), 100, GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + ::Balance::from(1_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); let subsistence_threshold = 50 /*existential_deposit*/ + 16 /*tombstone_deposit*/; // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!(ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap().rent_allowance, 1_000); - assert_eq!(Balances::free_balance(BOB), 100); + assert!(trigger_call(addr.clone())); + assert_eq!(ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance, 1_000); + assert_eq!(Balances::free_balance(&addr), 100); // Advance blocks initialize_block(10); // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); // Advance blocks initialize_block(20); // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); }); // Allowance exceeded @@ -903,43 +905,45 @@ fn removals(trigger_call: impl Fn() -> bool) { 1_000, GAS_LIMIT, code_hash.into(), - ::Balance::from(100u32).encode() // rent allowance + ::Balance::from(100u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Trigger rent must have no effect - assert!(trigger_call()); + assert!(trigger_call(addr.clone())); assert_eq!( - ContractInfoOf::::get(BOB) + ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap() .rent_allowance, 100 ); - assert_eq!(Balances::free_balance(BOB), 1_000); + assert_eq!(Balances::free_balance(&addr), 1_000); // Advance blocks initialize_block(10); // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB) + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr) .unwrap() .get_tombstone() .is_some()); // Balance should be initial balance - initial rent_allowance - assert_eq!(Balances::free_balance(BOB), 900); + assert_eq!(Balances::free_balance(&addr), 900); // Advance blocks initialize_block(20); // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB) + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr) .unwrap() .get_tombstone() .is_some()); - assert_eq!(Balances::free_balance(BOB), 900); + assert_eq!(Balances::free_balance(&addr), 900); }); // Balance reached and inferior to subsistence threshold @@ -950,20 +954,22 @@ fn removals(trigger_call: impl Fn() -> bool) { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); let subsistence_threshold = - Balances::minimum_balance() + ::TombstoneDeposit::get(); + Balances::minimum_balance() + ::TombstoneDeposit::get(); assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); assert_ok!(Contracts::instantiate( Origin::signed(ALICE), 50 + subsistence_threshold, GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + ::Balance::from(1_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Trigger rent must have no effect - assert!(trigger_call()); + assert!(trigger_call(addr.clone())); assert_eq!( - ContractInfoOf::::get(BOB) + ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap() @@ -971,43 +977,43 @@ fn removals(trigger_call: impl Fn() -> bool) { 1_000 ); assert_eq!( - Balances::free_balance(BOB), + Balances::free_balance(&addr), 50 + subsistence_threshold, ); // Transfer funds assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, - call::transfer() + call::transfer(&BOB), )); assert_eq!( - ContractInfoOf::::get(BOB) + ContractInfoOf::::get(&addr) .unwrap() .get_alive() .unwrap() .rent_allowance, 1_000 ); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); // Advance blocks initialize_block(10); // Trigger rent through call - assert!(trigger_call()); - assert_matches!(ContractInfoOf::::get(BOB), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert!(trigger_call(addr.clone())); + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); // Advance blocks initialize_block(20); // Trigger rent must have no effect - assert!(trigger_call()); - assert_matches!(ContractInfoOf::::get(BOB), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + assert!(trigger_call(addr.clone())); + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); }); } @@ -1027,32 +1033,36 @@ fn call_removed_contract() { Origin::signed(ALICE), 100, GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance + ::Balance::from(1_000u32).encode(), // rent allowance + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Calling contract should succeed. - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Advance blocks initialize_block(10); // Calling contract should remove contract and fail. assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()), Error::::NotCallable ); // Calling a contract that is about to evict shall emit an event. assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), + event: MetaEvent::contracts(RawEvent::Evicted(addr.clone(), true)), topics: vec![], }, ]); // Subsequent contract calls should also fail. assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), + Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, call::null()), Error::::NotCallable ); }) @@ -1075,20 +1085,24 @@ fn default_rent_allowance_on_instantiate() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, >::max_value()); // Advance blocks initialize_block(5); // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + assert_ok!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) + ); // Check contract is still alive - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive(); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive(); assert!(bob_contract.is_some()) }); } @@ -1130,12 +1144,12 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + event: MetaEvent::system(frame_system::RawEvent::NewAccount(ALICE)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(ALICE, 1_000_000)), topics: vec![], }, EventRecord { @@ -1157,18 +1171,20 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: 30_000, GAS_LIMIT, set_rent_code_hash.into(), - ::Balance::from(0u32).encode() + ::Balance::from(0u32).encode(), + vec![], )); + let addr_bob = Contracts::contract_address(&ALICE, &set_rent_code_hash, &[]); // Check if `BOB` was created successfully and that the rent allowance is // set to 0. - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap().get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, 0); if test_different_storage { assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, 0, GAS_LIMIT, + addr_bob.clone(), 0, GAS_LIMIT, call::set_storage_4_byte()) ); } @@ -1179,15 +1195,17 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 0 // we expect that it will get removed leaving tombstone. assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), + Contracts::call( + Origin::signed(ALICE), addr_bob.clone(), 0, GAS_LIMIT, call::null() + ), Error::::NotCallable ); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, event: MetaEvent::contracts( - RawEvent::Evicted(BOB.clone(), true) + RawEvent::Evicted(addr_bob.clone(), true) ), topics: vec![], }, @@ -1203,13 +1221,16 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: 30_000, GAS_LIMIT, restoration_code_hash.into(), - ::Balance::from(0u32).encode() + ::Balance::from(0u32).encode(), + vec![], )); + let addr_django = Contracts::contract_address(&CHARLIE, &restoration_code_hash, &[]); // Before performing a call to `DJANGO` save its original trie id. - let django_trie_id = ContractInfoOf::::get(DJANGO).unwrap() + let django_trie_id = ContractInfoOf::::get(&addr_django).unwrap() .get_alive().unwrap().trie_id; + // The trie is regarded as 'dirty' when it was written to in the current block. if !test_restore_to_with_dirty_storage { // Advance 1 block, to the 6th. initialize_block(6); @@ -1220,37 +1241,43 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: let perform_the_restoration = || { Contracts::call( Origin::signed(ALICE), - DJANGO, + addr_django.clone(), 0, GAS_LIMIT, - set_rent_code_hash.as_ref().to_vec(), + set_rent_code_hash + .as_ref() + .iter() + .chain(AsRef::<[u8]>::as_ref(&addr_bob)) + .cloned() + .collect(), ) }; if test_different_storage || test_restore_to_with_dirty_storage { // Parametrization of the test imply restoration failure. Check that `DJANGO` aka // restoration contract is still in place and also that `BOB` doesn't exist. - - assert_err_ignore_postinfo!( - perform_the_restoration(), - Error::::ContractTrapped, - ); - - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - let django_contract = ContractInfoOf::::get(DJANGO).unwrap() + let result = perform_the_restoration(); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); + let django_contract = ContractInfoOf::::get(&addr_django).unwrap() .get_alive().unwrap(); assert_eq!(django_contract.storage_size, 8); assert_eq!(django_contract.trie_id, django_trie_id); assert_eq!(django_contract.deduct_block, System::block_number()); match (test_different_storage, test_restore_to_with_dirty_storage) { (true, false) => { + assert_err_ignore_postinfo!( + result, Error::::InvalidTombstone, + ); assert_eq!(System::events(), vec![]); } (_, true) => { + assert_err_ignore_postinfo!( + result, Error::::InvalidContractOrigin, + ); pretty_assertions::assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), + event: MetaEvent::contracts(RawEvent::Evicted(addr_bob, true)), topics: vec![], }, EventRecord { @@ -1265,24 +1292,24 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(DJANGO)), + event: MetaEvent::system(frame_system::RawEvent::NewAccount(addr_django.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(DJANGO, 30_000)), + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(addr_django.clone(), 30_000)), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(CHARLIE, DJANGO, 30_000) + pallet_balances::RawEvent::Transfer(CHARLIE, addr_django.clone(), 30_000) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(CHARLIE, DJANGO)), + event: MetaEvent::contracts(RawEvent::Instantiated(CHARLIE, addr_django.clone())), topics: vec![], }, ]); @@ -1294,24 +1321,24 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: // Here we expect that the restoration is succeeded. Check that the restoration // contract `DJANGO` ceased to exist and that `BOB` returned back. - println!("{:?}", ContractInfoOf::::get(BOB)); - let bob_contract = ContractInfoOf::::get(BOB).unwrap() + println!("{:?}", ContractInfoOf::::get(&addr_bob)); + let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap() .get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, 50); assert_eq!(bob_contract.storage_size, 4); assert_eq!(bob_contract.trie_id, django_trie_id); assert_eq!(bob_contract.deduct_block, System::block_number()); - assert!(ContractInfoOf::::get(DJANGO).is_none()); + assert!(ContractInfoOf::::get(&addr_django).is_none()); assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(system::RawEvent::KilledAccount(DJANGO)), + event: MetaEvent::system(system::RawEvent::KilledAccount(addr_django.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: MetaEvent::contracts( - RawEvent::Restored(DJANGO, BOB, bob_contract.code_hash, 50) + RawEvent::Restored(addr_django, addr_bob, bob_contract.code_hash, 50) ), topics: vec![], }, @@ -1337,29 +1364,31 @@ fn storage_max_value_limit() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); assert_eq!(bob_contract.rent_allowance, >::max_value()); // Call contract with allowed storage value. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT * 2, // we are copying a huge buffer - ::MaxValueSize::get().encode(), + ::MaxValueSize::get().encode(), )); // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, GAS_LIMIT, - (::MaxValueSize::get() + 1).encode(), + (::MaxValueSize::get() + 1).encode(), ), Error::::ValueTooLarge, ); @@ -1386,13 +1415,15 @@ fn deploy_and_call_other_contract() { GAS_LIMIT, caller_code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &caller_code_hash, &[]); // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, GAS_LIMIT, callee_code_hash.as_ref().to_vec(), @@ -1417,11 +1448,13 @@ fn cannot_self_destruct_through_draning() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); @@ -1430,7 +1463,7 @@ fn cannot_self_destruct_through_draning() { assert_ok!( Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, GAS_LIMIT, vec![], @@ -1456,11 +1489,13 @@ fn cannot_self_destruct_while_live() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); @@ -1469,7 +1504,7 @@ fn cannot_self_destruct_while_live() { assert_err_ignore_postinfo!( Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, vec![0], @@ -1479,7 +1514,7 @@ fn cannot_self_destruct_while_live() { // Check that BOB is still alive. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); }); @@ -1502,11 +1537,13 @@ fn self_destruct_works() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. assert_matches!( - ContractInfoOf::::get(BOB), + ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_)) ); @@ -1514,7 +1551,7 @@ fn self_destruct_works() { assert_matches!( Contracts::call( Origin::signed(ALICE), - BOB, + addr.clone(), 0, GAS_LIMIT, vec![], @@ -1523,7 +1560,7 @@ fn self_destruct_works() { ); // Check that account is gone - assert!(ContractInfoOf::::get(BOB).is_none()); + assert!(ContractInfoOf::::get(&addr).is_none()); // check that the beneficiary (django) got remaining balance assert_eq!(Balances::free_balance(DJANGO), 100_000); @@ -1554,25 +1591,30 @@ fn destroy_contract_and_transfer_funds() { GAS_LIMIT, caller_code_hash.into(), callee_code_hash.as_ref().to_vec(), + vec![], )); + let addr_bob = Contracts::contract_address(&ALICE, &caller_code_hash, &[]); + let addr_charlie = Contracts::contract_address( + &addr_bob, &callee_code_hash, &[0x47, 0x11] + ); // Check that the CHARLIE contract has been instantiated. assert_matches!( - ContractInfoOf::::get(CHARLIE), + ContractInfoOf::::get(&addr_charlie), Some(ContractInfo::Alive(_)) ); // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. assert_ok!(Contracts::call( Origin::signed(ALICE), - BOB, + addr_bob, 0, GAS_LIMIT, - CHARLIE.encode(), + addr_charlie.encode(), )); // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(ContractInfoOf::::get(CHARLIE).is_none()); + assert!(ContractInfoOf::::get(&addr_charlie).is_none()); }); } @@ -1594,6 +1636,7 @@ fn cannot_self_destruct_in_constructor() { GAS_LIMIT, code_hash.into(), vec![], + vec![], ), Error::::NewContractNotFunded, ); @@ -1618,7 +1661,9 @@ fn crypto_hashes() { GAS_LIMIT, code_hash.into(), vec![], + vec![], )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Perform the call. let input = b"_DEAD_BEEF"; use sp_io::hashing::*; @@ -1642,7 +1687,7 @@ fn crypto_hashes() { params.extend_from_slice(input); let result = >::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, params, @@ -1658,7 +1703,7 @@ fn crypto_hashes() { fn transfer_return_code() { let (wasm, code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); + let subsistence = ConfigCache::::subsistence_threshold_uncached(); let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); @@ -1669,13 +1714,15 @@ fn transfer_return_code() { GAS_LIMIT, code_hash.into(), vec![], + vec![], ), ); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Contract has only the minimal balance so any transfer will return BelowSubsistence. let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, vec![], @@ -1685,11 +1732,11 @@ fn transfer_return_code() { // Contract has enough total balance in order to not go below the subsistence // threshold when transfering 100 balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); + Balances::make_free_balance_be(&addr, subsistence + 100); + Balances::reserve(&addr, subsistence + 100).unwrap(); let result = Contracts::bare_call( ALICE, - BOB, + addr, 0, GAS_LIMIT, vec![], @@ -1703,7 +1750,7 @@ fn call_return_code() { let (caller_code, caller_hash) = compile_module::("call_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); + let subsistence = ConfigCache::::subsistence_threshold_uncached(); let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); @@ -1716,16 +1763,18 @@ fn call_return_code() { GAS_LIMIT, caller_hash.into(), vec![0], + vec![], ), ); + let addr_bob = Contracts::contract_address(&ALICE, &caller_hash, &[]); // Contract calls into Django which is no valid contract let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], + AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::NotCallable); @@ -1736,51 +1785,53 @@ fn call_return_code() { GAS_LIMIT, callee_hash.into(), vec![0], + vec![], ), ); + let addr_django = Contracts::contract_address(&CHARLIE, &callee_hash, &[]); // Contract has only the minimal balance so any transfer will return BelowSubsistence. let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence // threshold when transfering 100 balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); + Balances::make_free_balance_be(&addr_bob, subsistence + 100); + Balances::reserve(&addr_bob, subsistence + 100).unwrap(); let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. - Balances::make_free_balance_be(&BOB, subsistence + 1000); + Balances::make_free_balance_be(&addr_bob, subsistence + 1000); let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![1], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&1u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr_bob, 0, GAS_LIMIT, - vec![2], + AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&2u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); @@ -1792,7 +1843,7 @@ fn instantiate_return_code() { let (caller_code, caller_hash) = compile_module::("instantiate_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); + let subsistence = ConfigCache::::subsistence_threshold_uncached(); let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); @@ -1806,13 +1857,15 @@ fn instantiate_return_code() { GAS_LIMIT, caller_hash.into(), vec![], + vec![], ), ); + let addr = Contracts::contract_address(&ALICE, &caller_hash, &[]); // Contract has only the minimal balance so any transfer will return BelowSubsistence. let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, vec![0; 33], @@ -1822,11 +1875,11 @@ fn instantiate_return_code() { // Contract has enough total balance in order to not go below the subsistence // threshold when transfering 100 balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); + Balances::make_free_balance_be(&addr, subsistence + 100); + Balances::reserve(&addr, subsistence + 100).unwrap(); let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, vec![0; 33], @@ -1834,10 +1887,10 @@ fn instantiate_return_code() { assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid - Balances::make_free_balance_be(&BOB, subsistence + 1000); + Balances::make_free_balance_be(&addr, subsistence + 1000); let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, vec![0; 33], @@ -1847,20 +1900,20 @@ fn instantiate_return_code() { // Contract has enough balance but callee reverts because "1" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, - callee_hash.iter().cloned().chain(sp_std::iter::once(1)).collect(), + callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr, 0, GAS_LIMIT, - callee_hash.iter().cloned().chain(sp_std::iter::once(2)).collect(), + callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), ).exec_result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 34b8ea74435384840e327d87cb09158e9a75f210..d90c7502b85e699b6979cf116254cbdb1e75ffcb 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -27,19 +27,20 @@ //! Thus, before executing a contract it should be reinstrument with new schedule. use crate::wasm::{prepare, runtime::Env, PrefabWasmModule}; -use crate::{CodeHash, CodeStorage, PristineCode, Schedule, Trait}; +use crate::{CodeHash, CodeStorage, PristineCode, Schedule, Config}; use sp_std::prelude::*; use sp_runtime::traits::Hash; +use sp_core::crypto::UncheckedFrom; use frame_support::StorageMap; /// Put code in the storage. The hash of code is used as a key and is returned /// as a result of this function. /// /// This function instruments the given code and caches it in the storage. -pub fn save( +pub fn save( original_code: Vec, schedule: &Schedule, -) -> Result, &'static str> { +) -> Result, &'static str> where T::AccountId: UncheckedFrom + AsRef<[u8]> { let prefab_module = prepare::prepare_contract::(&original_code, schedule)?; let code_hash = T::Hashing::hash(&original_code); @@ -54,10 +55,10 @@ pub fn save( /// This version neither checks nor instruments the passed in code. This is useful /// when code needs to be benchmarked without the injected instrumentation. #[cfg(feature = "runtime-benchmarks")] -pub fn save_raw( +pub fn save_raw( original_code: Vec, schedule: &Schedule, -) -> Result, &'static str> { +) -> Result, &'static str> where T::AccountId: UncheckedFrom + AsRef<[u8]> { let prefab_module = prepare::benchmarking::prepare_contract::(&original_code, schedule)?; let code_hash = T::Hashing::hash(&original_code); @@ -72,10 +73,10 @@ pub fn save_raw( /// If the module was instrumented with a lower version of schedule than /// the current one given as an argument, then this function will perform /// re-instrumentation and update the cache in the storage. -pub fn load( +pub fn load( code_hash: &CodeHash, schedule: &Schedule, -) -> Result { +) -> Result where T::AccountId: UncheckedFrom + AsRef<[u8]> { let mut prefab_module = >::get(code_hash).ok_or_else(|| "code is not found")?; diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index 2538f85fb73851beb1a07c8b2ccdda0a538439dd..cc61deb074b756a9669f31e22ae7556b8677e7d5 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -127,7 +127,12 @@ macro_rules! define_func { fn $name< E: $seal_ty >( $ctx: &mut $crate::wasm::Runtime, args: &[sp_sandbox::Value], - ) -> Result { + ) -> Result + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { #[allow(unused)] let mut args = args.iter(); @@ -183,7 +188,12 @@ macro_rules! define_env { } } - impl $crate::wasm::env_def::FunctionImplProvider for $init_name { + impl $crate::wasm::env_def::FunctionImplProvider for $init_name + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { fn impls)>(f: &mut F) { register_func!(f, < E: $seal_ty > ; $( $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* ); } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index f90f3af688d96d2eceb7f54b3c1b3daae7cf6680..7d7668d5ec6d227dc6a712f2ee175838dae3a22b 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -17,12 +17,13 @@ //! This module provides a means for executing contracts //! represented in wasm. -use crate::{CodeHash, Schedule, Trait}; +use crate::{CodeHash, Schedule, Config}; use crate::wasm::env_def::FunctionImplProvider; use crate::exec::Ext; use crate::gas::GasMeter; use sp_std::prelude::*; +use sp_core::crypto::UncheckedFrom; use codec::{Encode, Decode}; use sp_sandbox; @@ -32,7 +33,7 @@ mod code_cache; mod prepare; mod runtime; -use self::runtime::{to_execution_result, Runtime}; +use self::runtime::Runtime; use self::code_cache::load as load_code; use pallet_contracts_primitives::ExecResult; @@ -67,17 +68,20 @@ pub struct WasmExecutable { } /// Loader which fetches `WasmExecutable` from the code cache. -pub struct WasmLoader<'a, T: Trait> { +pub struct WasmLoader<'a, T: Config> { schedule: &'a Schedule, } -impl<'a, T: Trait> WasmLoader<'a, T> { +impl<'a, T: Config> WasmLoader<'a, T> where T::AccountId: UncheckedFrom + AsRef<[u8]> { pub fn new(schedule: &'a Schedule) -> Self { WasmLoader { schedule } } } -impl<'a, T: Trait> crate::exec::Loader for WasmLoader<'a, T> { +impl<'a, T: Config> crate::exec::Loader for WasmLoader<'a, T> +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ type Executable = WasmExecutable; fn load_init(&self, code_hash: &CodeHash) -> Result { @@ -97,17 +101,20 @@ impl<'a, T: Trait> crate::exec::Loader for WasmLoader<'a, T> { } /// Implementation of `Vm` that takes `WasmExecutable` and executes it. -pub struct WasmVm<'a, T: Trait> { +pub struct WasmVm<'a, T: Config> where T::AccountId: UncheckedFrom + AsRef<[u8]> { schedule: &'a Schedule, } -impl<'a, T: Trait> WasmVm<'a, T> { +impl<'a, T: Config> WasmVm<'a, T> where T::AccountId: UncheckedFrom + AsRef<[u8]> { pub fn new(schedule: &'a Schedule) -> Self { WasmVm { schedule } } } -impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a, T> { +impl<'a, T: Config> crate::exec::Vm for WasmVm<'a, T> +where + T::AccountId: UncheckedFrom + AsRef<[u8]> +{ type Executable = WasmExecutable; fn execute>( @@ -147,20 +154,22 @@ impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a, T> { // entrypoint. let result = sp_sandbox::Instance::new(&exec.prefab_module.code, &imports, &mut runtime) .and_then(|mut instance| instance.invoke(exec.entrypoint_name, &[], &mut runtime)); - to_execution_result(runtime, result) + runtime.to_execution_result(result) } } #[cfg(test)] mod tests { use super::*; + use crate::{ + CodeHash, BalanceOf, Error, Module as Contracts, + exec::{Ext, StorageKey, AccountIdOf}, + gas::{Gas, GasMeter}, + tests::{Test, Call, ALICE, BOB}, + wasm::prepare::prepare_contract, + }; use std::collections::HashMap; use sp_core::H256; - use crate::exec::{Ext, StorageKey}; - use crate::gas::{Gas, GasMeter}; - use crate::tests::{Test, Call}; - use crate::wasm::prepare::prepare_contract; - use crate::{CodeHash, BalanceOf, Error}; use hex_literal::hex; use sp_runtime::DispatchError; use frame_support::weights::Weight; @@ -174,7 +183,7 @@ mod tests { #[derive(Debug, PartialEq, Eq)] struct RestoreEntry { - dest: u64, + dest: AccountIdOf, code_hash: H256, rent_allowance: u64, delta: Vec, @@ -186,16 +195,17 @@ mod tests { endowment: u64, data: Vec, gas_left: u64, + salt: Vec, } #[derive(Debug, PartialEq, Eq)] struct TerminationEntry { - beneficiary: u64, + beneficiary: AccountIdOf, } #[derive(Debug, PartialEq, Eq)] struct TransferEntry { - to: u64, + to: AccountIdOf, value: u64, data: Vec, } @@ -210,7 +220,6 @@ mod tests { restores: Vec, // (topics, data) events: Vec<(Vec, Vec)>, - next_account_id: u64, } impl Ext for MockExt { @@ -228,18 +237,17 @@ mod tests { endowment: u64, gas_meter: &mut GasMeter, data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { + salt: &[u8], + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { code_hash: code_hash.clone(), endowment, data: data.to_vec(), gas_left: gas_meter.gas_left(), + salt: salt.to_vec(), }); - let address = self.next_account_id; - self.next_account_id += 1; - Ok(( - address, + Contracts::::contract_address(&ALICE, code_hash, salt), ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new(), @@ -248,11 +256,11 @@ mod tests { } fn transfer( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, ) -> Result<(), DispatchError> { self.transfers.push(TransferEntry { - to: *to, + to: to.clone(), value, data: Vec::new(), }); @@ -260,13 +268,13 @@ mod tests { } fn call( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, _gas_meter: &mut GasMeter, data: Vec, ) -> ExecResult { self.transfers.push(TransferEntry { - to: *to, + to: to.clone(), value, data: data, }); @@ -276,20 +284,20 @@ mod tests { } fn terminate( &mut self, - beneficiary: &u64, + beneficiary: &AccountIdOf, ) -> Result<(), DispatchError> { self.terminations.push(TerminationEntry { - beneficiary: *beneficiary, + beneficiary: beneficiary.clone(), }); Ok(()) } fn restore_to( &mut self, - dest: u64, + dest: AccountIdOf, code_hash: H256, rent_allowance: u64, delta: Vec, - ) -> Result<(), &'static str> { + ) -> Result<(), DispatchError> { self.restores.push(RestoreEntry { dest, code_hash, @@ -298,11 +306,11 @@ mod tests { }); Ok(()) } - fn caller(&self) -> &u64 { - &42 + fn caller(&self) -> &AccountIdOf { + &ALICE } - fn address(&self) -> &u64 { - &69 + fn address(&self) -> &AccountIdOf { + &BOB } fn balance(&self) -> u64 { 228 @@ -363,25 +371,26 @@ mod tests { value: u64, gas_meter: &mut GasMeter, input_data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { - (**self).instantiate(code, value, gas_meter, input_data) + salt: &[u8], + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { + (**self).instantiate(code, value, gas_meter, input_data, salt) } fn transfer( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, ) -> Result<(), DispatchError> { (**self).transfer(to, value) } fn terminate( &mut self, - beneficiary: &u64, + beneficiary: &AccountIdOf, ) -> Result<(), DispatchError> { (**self).terminate(beneficiary) } fn call( &mut self, - to: &u64, + to: &AccountIdOf, value: u64, gas_meter: &mut GasMeter, input_data: Vec, @@ -390,11 +399,11 @@ mod tests { } fn restore_to( &mut self, - dest: u64, + dest: AccountIdOf, code_hash: H256, rent_allowance: u64, delta: Vec, - ) -> Result<(), &'static str> { + ) -> Result<(), DispatchError> { (**self).restore_to( dest, code_hash, @@ -402,10 +411,10 @@ mod tests { delta, ) } - fn caller(&self) -> &u64 { + fn caller(&self) -> &AccountIdOf { (**self).caller() } - fn address(&self) -> &u64 { + fn address(&self) -> &AccountIdOf { (**self).address() } fn balance(&self) -> u64 { @@ -451,7 +460,11 @@ mod tests { input_data: Vec, ext: E, gas_meter: &mut GasMeter, - ) -> ExecResult { + ) -> ExecResult + where + ::AccountId: + UncheckedFrom<::Hash> + AsRef<[u8]> + { use crate::exec::Vm; let wasm = wat::parse_str(wat).unwrap(); @@ -485,21 +498,23 @@ mod tests { (drop (call $seal_transfer (i32.const 4) ;; Pointer to "account" address. - (i32.const 8) ;; Length of "account" address. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 32) ;; Length of "account" address. + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. ) ) ) (func (export "deploy")) - ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\07\00\00\00\00\00\00\00") + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\99\00\00\00\00\00\00\00") + (data (i32.const 36) "\99\00\00\00\00\00\00\00") ) "#; @@ -516,7 +531,7 @@ mod tests { assert_eq!( &mock_ext.transfers, &[TransferEntry { - to: 7, + to: ALICE, value: 153, data: Vec::new(), }] @@ -542,11 +557,11 @@ mod tests { (drop (call $seal_call (i32.const 4) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address + (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case @@ -555,14 +570,17 @@ mod tests { ) (func (export "deploy")) - ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\06\00\00\00\00\00\00\00") + (data (i32.const 36) "\06\00\00\00\00\00\00\00") - (data (i32.const 20) "\01\02\03\04") + (data (i32.const 44) "\01\02\03\04") ) "#; @@ -579,7 +597,7 @@ mod tests { assert_eq!( &mock_ext.transfers, &[TransferEntry { - to: 9, + to: ALICE, value: 6, data: vec![1, 2, 3, 4], }] @@ -602,7 +620,9 @@ mod tests { ;; output_ptr: u32, ;; output_len_ptr: u32 ;; ) -> u32 - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "env" "memory" (memory 1 1)) (func (export "call") (drop @@ -618,11 +638,15 @@ mod tests { (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 4) ;; salt_len ) ) ) (func (export "deploy")) + ;; Salt + (data (i32.const 0) "\42\43\44\45") ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. (data (i32.const 4) "\03\00\00\00\00\00\00\00") @@ -653,7 +677,11 @@ mod tests { endowment: 3, data, gas_left: _, - }] if code_hash == &[0x11; 32].into() && data == &vec![1, 2, 3, 4] + salt, + }] if + code_hash == &[0x11; 32].into() && + data == &vec![1, 2, 3, 4] && + salt == &vec![0x42, 0x43, 0x44, 0x45] ); } @@ -668,14 +696,16 @@ mod tests { (func (export "call") (call $seal_terminate (i32.const 4) ;; Pointer to "beneficiary" address. - (i32.const 8) ;; Length of "beneficiary" address. + (i32.const 32) ;; Length of "beneficiary" address. ) ) (func (export "deploy")) ;; Beneficiary AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ) "#; @@ -692,7 +722,7 @@ mod tests { assert_eq!( &mock_ext.terminations, &[TerminationEntry { - beneficiary: 0x09, + beneficiary: ALICE, }] ); } @@ -716,11 +746,11 @@ mod tests { (drop (call $seal_call (i32.const 4) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 228) ;; How much gas to devote for the execution. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address + (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this cas @@ -730,13 +760,15 @@ mod tests { (func (export "deploy")) ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\06\00\00\00\00\00\00\00") + (data (i32.const 36) "\06\00\00\00\00\00\00\00") - (data (i32.const 20) "\01\02\03\04") + (data (i32.const 44) "\01\02\03\04") ) "#; @@ -753,7 +785,7 @@ mod tests { assert_eq!( &mock_ext.transfers, &[TransferEntry { - to: 9, + to: ALICE, value: 6, data: vec![1, 2, 3, 4], }] @@ -863,19 +895,19 @@ mod tests { ;; fill the buffer with the caller. (call $seal_caller (i32.const 0) (i32.const 32)) - ;; assert len == 8 + ;; assert len == 32 (call $assert (i32.eq (i32.load (i32.const 32)) - (i32.const 8) + (i32.const 32) ) ) - ;; assert that contents of the buffer is equal to the i64 value of 42. + ;; assert that the first 64 byte are the beginning of "ALICE" (call $assert (i64.eq (i64.load (i32.const 0)) - (i64.const 42) + (i64.const 0x0101010101010101) ) ) ) @@ -916,19 +948,19 @@ mod tests { ;; fill the buffer with the self address. (call $seal_address (i32.const 0) (i32.const 32)) - ;; assert size == 8 + ;; assert size == 32 (call $assert (i32.eq (i32.load (i32.const 32)) - (i32.const 8) + (i32.const 32) ) ) - ;; assert that contents of the buffer is equal to the i64 value of 69. + ;; assert that the first 64 byte are the beginning of "BOB" (call $assert (i64.eq (i64.load (i32.const 0)) - (i64.const 69) + (i64.const 0x0202020202020202) ) ) ) diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 73c149d025d793d3f1523ee2300f8531f3a120d4..56e21d2ee664cc2377a3c303155fd92f817a75fb 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -20,7 +20,7 @@ use crate::wasm::env_def::ImportSatisfyCheck; use crate::wasm::PrefabWasmModule; -use crate::{Schedule, Trait}; +use crate::{Schedule, Config}; use parity_wasm::elements::{self, Internal, External, MemoryType, Type, ValueType}; use pwasm_utils; @@ -34,13 +34,13 @@ pub const IMPORT_MODULE_FN: &str = "seal0"; /// compiler toolchains might not support specifying other modules than "env" for memory imports. pub const IMPORT_MODULE_MEMORY: &str = "env"; -struct ContractModule<'a, T: Trait> { +struct ContractModule<'a, T: Config> { /// A deserialized module. The module is valid (this is Guaranteed by `new` method). module: elements::Module, schedule: &'a Schedule, } -impl<'a, T: Trait> ContractModule<'a, T> { +impl<'a, T: Config> ContractModule<'a, T> { /// Creates a new instance of `ContractModule`. /// /// Returns `Err` if the `original_code` couldn't be decoded or @@ -369,7 +369,7 @@ impl<'a, T: Trait> ContractModule<'a, T> { } } -fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule) +fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule) -> Result<(u32, u32), &'static str> { if let Some(memory_type) = module { @@ -410,7 +410,7 @@ fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule< /// - all imported functions from the external environment matches defined by `env` module, /// /// The preprocessing includes injecting code for gas metering and metering the height of stack. -pub fn prepare_contract( +pub fn prepare_contract( original_code: &[u8], schedule: &Schedule, ) -> Result { @@ -452,7 +452,7 @@ pub fn prepare_contract( #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking { use super::{ - Trait, ContractModule, PrefabWasmModule, ImportSatisfyCheck, Schedule, get_memory_limits + Config, ContractModule, PrefabWasmModule, ImportSatisfyCheck, Schedule, get_memory_limits }; use parity_wasm::elements::FunctionType; @@ -463,7 +463,7 @@ pub mod benchmarking { } /// Prepare function that neither checks nor instruments the passed in code. - pub fn prepare_contract(original_code: &[u8], schedule: &Schedule) + pub fn prepare_contract(original_code: &[u8], schedule: &Schedule) -> Result { let contract_module = ContractModule::new(original_code, schedule)?; diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index c7de93ece70ff713f9b2fac5b6286deb88895489..ac1cb1f54d56f2374613f746ec91cd4061c2fb11 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -17,7 +17,7 @@ //! Environment definition of the wasm smart-contract runtime. use crate::{ - HostFnWeights, Schedule, Trait, CodeHash, BalanceOf, Error, + HostFnWeights, Schedule, Config, CodeHash, BalanceOf, Error, exec::{Ext, StorageKey, TopicOf}, gas::{Gas, GasMeter, Token, GasMeterResult}, wasm::env_def::ConvertibleToWasm, @@ -29,6 +29,7 @@ use frame_support::dispatch::DispatchError; use sp_std::prelude::*; use codec::{Decode, Encode}; use sp_runtime::traits::SaturatedConversion; +use sp_core::crypto::UncheckedFrom; use sp_io::hashing::{ keccak_256, blake2_256, @@ -116,92 +117,6 @@ enum TrapReason { Restoration, } -/// Can only be used for one call. -pub(crate) struct Runtime<'a, E: Ext + 'a> { - ext: &'a mut E, - input_data: Option>, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - trap_reason: Option, -} -impl<'a, E: Ext + 'a> Runtime<'a, E> { - pub(crate) fn new( - ext: &'a mut E, - input_data: Vec, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - ) -> Self { - Runtime { - ext, - input_data: Some(input_data), - schedule, - memory, - gas_meter, - trap_reason: None, - } - } -} - -/// Converts the sandbox result and the runtime state into the execution outcome. -/// -/// It evaluates information stored in the `trap_reason` variable of the runtime and -/// bases the outcome on the value if this variable. Only if `trap_reason` is `None` -/// the result of the sandbox is evaluated. -pub(crate) fn to_execution_result( - runtime: Runtime, - sandbox_result: Result, -) -> ExecResult { - // If a trap reason is set we base our decision solely on that. - if let Some(trap_reason) = runtime.trap_reason { - return match trap_reason { - // The trap was the result of the execution `return` host function. - TrapReason::Return(ReturnData{ flags, data }) => { - let flags = ReturnFlags::from_bits(flags).ok_or_else(|| - "used reserved bit in return flags" - )?; - Ok(ExecReturnValue { - flags, - data, - }) - }, - TrapReason::Termination => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) - }, - TrapReason::Restoration => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) - }, - TrapReason::SupervisorError(error) => Err(error)?, - } - } - - // Check the exact type of the error. - match sandbox_result { - // No traps were generated. Proceed normally. - Ok(_) => { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) - } - // `Error::Module` is returned only if instantiation or linking failed (i.e. - // wasm binary tried to import a function that is not provided by the host). - // This shouldn't happen because validation process ought to reject such binaries. - // - // Because panics are really undesirable in the runtime code, we treat this as - // a trap for now. Eventually, we might want to revisit this. - Err(sp_sandbox::Error::Module) => - Err("validation error")?, - // Any other kind of a trap should result in a failure. - Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - Err(Error::::ContractTrapped)? - } -} - #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum RuntimeToken { @@ -262,10 +177,10 @@ pub enum RuntimeToken { CallSurchargeTransfer, /// Weight of output received through `seal_call` for the given size. CallCopyOut(u32), - /// Weight of calling `seal_instantiate` for the given input size without output weight. + /// Weight of calling `seal_instantiate` for the given input and salt without output weight. /// This includes the transfer as an instantiate without a value will always be below /// the existential deposit and is disregarded as corner case. - InstantiateBase(u32), + InstantiateBase{input_data_len: u32, salt_len: u32}, /// Weight of output received through `seal_instantiate` for the given size. InstantiateCopyOut(u32), /// Weight of calling `seal_hash_sha_256` for the given input size. @@ -278,7 +193,10 @@ pub enum RuntimeToken { HashBlake128(u32), } -impl Token for RuntimeToken { +impl Token for RuntimeToken +where + T::AccountId: UncheckedFrom, T::AccountId: AsRef<[u8]> +{ type Metadata = HostFnWeights; fn calculate_amount(&self, s: &Self::Metadata) -> Gas { @@ -318,8 +236,9 @@ impl Token for RuntimeToken { .saturating_add(s.call_per_input_byte.saturating_mul(len.into())), CallSurchargeTransfer => s.call_transfer_surcharge, CallCopyOut(len) => s.call_per_output_byte.saturating_mul(len.into()), - InstantiateBase(len) => s.instantiate - .saturating_add(s.instantiate_per_input_byte.saturating_mul(len.into())), + InstantiateBase{input_data_len, salt_len} => s.instantiate + .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) + .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), InstantiateCopyOut(len) => s.instantiate_per_output_byte .saturating_mul(len.into()), HashSha256(len) => s.hash_sha2_256 @@ -334,215 +253,345 @@ impl Token for RuntimeToken { } } -/// Charge the gas meter with the specified token. -/// -/// Returns `Err(HostError)` if there is not enough gas. -fn charge_gas(ctx: &mut Runtime, token: Tok) -> Result<(), sp_sandbox::HostError> -where - E: Ext, - Tok: Token>, -{ - match ctx.gas_meter.charge(&ctx.schedule.host_fn_weights, token) { - GasMeterResult::Proceed => Ok(()), - GasMeterResult::OutOfGas => { - ctx.trap_reason = Some(TrapReason::SupervisorError(Error::::OutOfGas.into())); - Err(sp_sandbox::HostError) - }, - } +/// This is only appropriate when writing out data of constant size that does not depend on user +/// input. In this case the costs for this copy was already charged as part of the token at +/// the beginning of the API entry point. +fn already_charged(_: u32) -> Option { + None } -/// Read designated chunk from the sandbox memory. -/// -/// Returns `Err` if one of the following conditions occurs: +/// Finds duplicates in a given vector. /// -/// - requested buffer is not within the bounds of the sandbox memory. -fn read_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - len: u32, -) -> Result, sp_sandbox::HostError> { - let mut buf = vec![0u8; len as usize]; - ctx.memory.get(ptr, buf.as_mut_slice()) - .map_err(|_| store_err(ctx, Error::::OutOfBounds))?; - Ok(buf) +/// This function has complexity of O(n log n) and no additional memory is required, although +/// the order of items is not preserved. +fn has_duplicates>(items: &mut Vec) -> bool { + // Sort the vector + items.sort_by(|a, b| { + Ord::cmp(a.as_ref(), b.as_ref()) + }); + // And then find any two consecutive equal elements. + items.windows(2).any(|w| { + match w { + &[ref a, ref b] => a == b, + _ => false, + } + }) } -/// Read designated chunk from the sandbox memory into the supplied buffer. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - requested buffer is not within the bounds of the sandbox memory. -fn read_sandbox_memory_into_buf( - ctx: &mut Runtime, - ptr: u32, - buf: &mut [u8], -) -> Result<(), sp_sandbox::HostError> { - ctx.memory.get(ptr, buf).map_err(|_| store_err(ctx, Error::::OutOfBounds)) +/// Can only be used for one call. +pub struct Runtime<'a, E: Ext + 'a> { + ext: &'a mut E, + input_data: Option>, + schedule: &'a Schedule, + memory: sp_sandbox::Memory, + gas_meter: &'a mut GasMeter, + trap_reason: Option, } -/// Read designated chunk from the sandbox memory and attempt to decode into the specified type. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - requested buffer is not within the bounds of the sandbox memory. -/// - the buffer contents cannot be decoded as the required type. -fn read_sandbox_memory_as( - ctx: &mut Runtime, - ptr: u32, - len: u32, -) -> Result { - let buf = read_sandbox_memory(ctx, ptr, len)?; - D::decode(&mut &buf[..]).map_err(|_| store_err(ctx, Error::::DecodingFailed)) -} +impl<'a, E> Runtime<'a, E> +where + E: Ext + 'a, + ::AccountId: + UncheckedFrom<::Hash> + AsRef<[u8]> +{ + pub fn new( + ext: &'a mut E, + input_data: Vec, + schedule: &'a Schedule, + memory: sp_sandbox::Memory, + gas_meter: &'a mut GasMeter, + ) -> Self { + Runtime { + ext, + input_data: Some(input_data), + schedule, + memory, + gas_meter, + trap_reason: None, + } + } -/// Write the given buffer to the designated location in the sandbox memory. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - designated area is not within the bounds of the sandbox memory. -fn write_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - buf: &[u8], -) -> Result<(), sp_sandbox::HostError> { - ctx.memory.set(ptr, buf).map_err(|_| store_err(ctx, Error::::OutOfBounds)) -} + /// Converts the sandbox result and the runtime state into the execution outcome. + /// + /// It evaluates information stored in the `trap_reason` variable of the runtime and + /// bases the outcome on the value if this variable. Only if `trap_reason` is `None` + /// the result of the sandbox is evaluated. + pub fn to_execution_result( + self, + sandbox_result: Result, + ) -> ExecResult { + // If a trap reason is set we base our decision solely on that. + if let Some(trap_reason) = self.trap_reason { + return match trap_reason { + // The trap was the result of the execution `return` host function. + TrapReason::Return(ReturnData{ flags, data }) => { + let flags = ReturnFlags::from_bits(flags).ok_or_else(|| + "used reserved bit in return flags" + )?; + Ok(ExecReturnValue { + flags, + data, + }) + }, + TrapReason::Termination => { + Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: Vec::new(), + }) + }, + TrapReason::Restoration => { + Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: Vec::new(), + }) + }, + TrapReason::SupervisorError(error) => Err(error)?, + } + } -/// Write the given buffer and its length to the designated locations in sandbox memory and -/// charge gas according to the token returned by `create_token`. -// -/// `out_ptr` is the location in sandbox memory where `buf` should be written to. -/// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the -/// length of the buffer located at `out_ptr`. If that buffer is large enough the actual -/// `buf.len()` is written to this location. -/// -/// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the -/// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying -/// output optional. For example to skip copying back the output buffer of an `seal_call` -/// when the caller is not interested in the result. -/// -/// `create_token` can optionally instruct this function to charge the gas meter with the token -/// it returns. `create_token` receives the variable amount of bytes that are about to be copied by -/// this function. -/// -/// In addition to the error conditions of `write_sandbox_memory` this functions returns -/// `Err` if the size of the buffer located at `out_ptr` is too small to fit `buf`. -fn write_sandbox_output( - ctx: &mut Runtime, - out_ptr: u32, - out_len_ptr: u32, - buf: &[u8], - allow_skip: bool, - create_token: impl FnOnce(u32) -> Option, -) -> Result<(), sp_sandbox::HostError> { - if allow_skip && out_ptr == u32::max_value() { - return Ok(()); + // Check the exact type of the error. + match sandbox_result { + // No traps were generated. Proceed normally. + Ok(_) => { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + } + // `Error::Module` is returned only if instantiation or linking failed (i.e. + // wasm binary tried to import a function that is not provided by the host). + // This shouldn't happen because validation process ought to reject such binaries. + // + // Because panics are really undesirable in the runtime code, we treat this as + // a trap for now. Eventually, we might want to revisit this. + Err(sp_sandbox::Error::Module) => + Err("validation error")?, + // Any other kind of a trap should result in a failure. + Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => + Err(Error::::ContractTrapped)? + } } - let buf_len = buf.len() as u32; - let len: u32 = read_sandbox_memory_as(ctx, out_len_ptr, 4)?; + /// Charge the gas meter with the specified token. + /// + /// Returns `Err(HostError)` if there is not enough gas. + fn charge_gas(&mut self, token: Tok) -> Result<(), sp_sandbox::HostError> + where + Tok: Token>, + { + match self.gas_meter.charge(&self.schedule.host_fn_weights, token) { + GasMeterResult::Proceed => Ok(()), + GasMeterResult::OutOfGas => { + self.trap_reason = Some( + TrapReason::SupervisorError(Error::::OutOfGas.into()) + ); + Err(sp_sandbox::HostError) + }, + } + } - if len < buf_len { - Err(store_err(ctx, Error::::OutputBufferTooSmall))? + /// Read designated chunk from the sandbox memory. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + fn read_sandbox_memory(&mut self, ptr: u32, len: u32) + -> Result, sp_sandbox::HostError> + { + let mut buf = vec![0u8; len as usize]; + self.memory.get(ptr, buf.as_mut_slice()) + .map_err(|_| self.store_err(Error::::OutOfBounds))?; + Ok(buf) } - if let Some(token) = create_token(buf_len) { - charge_gas(ctx, token)?; + /// Read designated chunk from the sandbox memory into the supplied buffer. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + fn read_sandbox_memory_into_buf(&mut self, ptr: u32, buf: &mut [u8]) + -> Result<(), sp_sandbox::HostError> + { + self.memory.get(ptr, buf).map_err(|_| self.store_err(Error::::OutOfBounds)) } - ctx.memory.set(out_ptr, buf).and_then(|_| { - ctx.memory.set(out_len_ptr, &buf_len.encode()) - }) - .map_err(|_| store_err(ctx, Error::::OutOfBounds))?; + /// Read designated chunk from the sandbox memory and attempt to decode into the specified type. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + /// - the buffer contents cannot be decoded as the required type. + fn read_sandbox_memory_as(&mut self, ptr: u32, len: u32) + -> Result + { + let buf = self.read_sandbox_memory(ptr, len)?; + D::decode(&mut &buf[..]).map_err(|_| self.store_err(Error::::DecodingFailed)) + } - Ok(()) -} + /// Write the given buffer to the designated location in the sandbox memory. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - designated area is not within the bounds of the sandbox memory. + fn write_sandbox_memory(&mut self, ptr: u32, buf: &[u8]) -> Result<(), sp_sandbox::HostError> { + self.memory.set(ptr, buf).map_err(|_| self.store_err(Error::::OutOfBounds)) + } -/// Supply to `write_sandbox_output` to indicate that the gas meter should not be charged. -/// -/// This is only appropriate when writing out data of constant size that does not depend on user -/// input. In this case the costs for this copy was already charged as part of the token at -/// the beginning of the API entry point. -fn already_charged(_: u32) -> Option { - None -} + /// Write the given buffer and its length to the designated locations in sandbox memory and + /// charge gas according to the token returned by `create_token`. + // + /// `out_ptr` is the location in sandbox memory where `buf` should be written to. + /// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the + /// length of the buffer located at `out_ptr`. If that buffer is large enough the actual + /// `buf.len()` is written to this location. + /// + /// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the + /// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying + /// output optional. For example to skip copying back the output buffer of an `seal_call` + /// when the caller is not interested in the result. + /// + /// `create_token` can optionally instruct this function to charge the gas meter with the token + /// it returns. `create_token` receives the variable amount of bytes that are about to be copied by + /// this function. + /// + /// In addition to the error conditions of `write_sandbox_memory` this functions returns + /// `Err` if the size of the buffer located at `out_ptr` is too small to fit `buf`. + fn write_sandbox_output( + &mut self, + out_ptr: u32, + out_len_ptr: u32, + buf: &[u8], + allow_skip: bool, + create_token: impl FnOnce(u32) -> Option, + ) -> Result<(), sp_sandbox::HostError> + { + if allow_skip && out_ptr == u32::max_value() { + return Ok(()); + } -/// Stores a DispatchError returned from an Ext function into the trap_reason. -/// -/// This allows through supervisor generated errors to the caller. -fn store_err(ctx: &mut Runtime, err: Error) -> sp_sandbox::HostError where - E: Ext, - Error: Into, -{ - ctx.trap_reason = Some(TrapReason::SupervisorError(err.into())); - sp_sandbox::HostError -} + let buf_len = buf.len() as u32; + let len: u32 = self.read_sandbox_memory_as(out_len_ptr, 4)?; -/// Fallible conversion of `DispatchError` to `ReturnCode`. -fn err_into_return_code(from: DispatchError) -> Result { - use ReturnCode::*; - - let below_sub = Error::::BelowSubsistenceThreshold.into(); - let transfer_failed = Error::::TransferFailed.into(); - let not_funded = Error::::NewContractNotFunded.into(); - let no_code = Error::::CodeNotFound.into(); - let invalid_contract = Error::::NotCallable.into(); - - match from { - x if x == below_sub => Ok(BelowSubsistenceThreshold), - x if x == transfer_failed => Ok(TransferFailed), - x if x == not_funded => Ok(NewContractNotFunded), - x if x == no_code => Ok(CodeNotFound), - x if x == invalid_contract => Ok(NotCallable), - err => Err(err) + if len < buf_len { + Err(self.store_err(Error::::OutputBufferTooSmall))? + } + + if let Some(token) = create_token(buf_len) { + self.charge_gas(token)?; + } + + self.memory.set(out_ptr, buf).and_then(|_| { + self.memory.set(out_len_ptr, &buf_len.encode()) + }) + .map_err(|_| self.store_err(Error::::OutOfBounds))?; + + Ok(()) } -} -/// Fallible conversion of a `ExecResult` to `ReturnCode`. -fn exec_into_return_code(from: ExecResult) -> Result { - use pallet_contracts_primitives::ErrorOrigin::Callee; + /// Computes the given hash function on the supplied input. + /// + /// Reads from the sandboxed input buffer into an intermediate buffer. + /// Returns the result directly to the output buffer of the sandboxed memory. + /// + /// It is the callers responsibility to provide an output buffer that + /// is large enough to hold the expected amount of bytes returned by the + /// chosen hash function. + /// + /// # Note + /// + /// The `input` and `output` buffers may overlap. + fn compute_hash_on_intermediate_buffer( + &mut self, + hash_fn: F, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), sp_sandbox::HostError> + where + F: FnOnce(&[u8]) -> R, + R: AsRef<[u8]>, + { + // Copy input into supervisor memory. + let input = self.read_sandbox_memory(input_ptr, input_len)?; + // Compute the hash on the input buffer using the given hash function. + let hash = hash_fn(&input); + // Write the resulting hash back into the sandboxed output buffer. + self.write_sandbox_memory(output_ptr, hash.as_ref())?; + Ok(()) + } - let ExecError { error, origin } = match from { - Ok(retval) => return Ok(retval.into()), - Err(err) => err, - }; + /// Stores a DispatchError returned from an Ext function into the trap_reason. + /// + /// This allows through supervisor generated errors to the caller. + fn store_err(&mut self, err: Error) -> sp_sandbox::HostError + where + Error: Into, + { + self.trap_reason = Some(TrapReason::SupervisorError(err.into())); + sp_sandbox::HostError + } - match (error, origin) { - (_, Callee) => Ok(ReturnCode::CalleeTrapped), - (err, _) => err_into_return_code::(err) + /// Used by Runtime API that calls into other contracts. + /// + /// Those need to transform the the `ExecResult` returned from the execution into + /// a `ReturnCode`. If this conversion fails because the `ExecResult` constitutes a + /// a fatal error then this error is stored in the `ExecutionContext` so it can be + /// extracted for display in the UI. + fn map_exec_result(&mut self, result: ExecResult) -> Result { + match Self::exec_into_return_code(result) { + Ok(code) => Ok(code), + Err(err) => Err(self.store_err(err)), + } } -} -/// Used by Runtime API that calls into other contracts. -/// -/// Those need to transform the the `ExecResult` returned from the execution into -/// a `ReturnCode`. If this conversion fails because the `ExecResult` constitutes a -/// a fatal error then this error is stored in the `ExecutionContext` so it can be -/// extracted for display in the UI. -fn map_exec_result(ctx: &mut Runtime, result: ExecResult) + /// Try to convert an error into a `ReturnCode`. + /// + /// Used to decide between fatal and non-fatal errors. + fn map_dispatch_result(&mut self, result: Result) -> Result -{ - match exec_into_return_code::(result) { - Ok(code) => Ok(code), - Err(err) => Err(store_err(ctx, err)), + { + let err = if let Err(err) = result { + err + } else { + return Ok(ReturnCode::Success) + }; + + match Self::err_into_return_code(err) { + Ok(code) => Ok(code), + Err(err) => Err(self.store_err(err)), + } } -} -/// Try to convert an error into a `ReturnCode`. -/// -/// Used to decide between fatal and non-fatal errors. -fn map_dispatch_result(ctx: &mut Runtime, result: Result) - -> Result -{ - let err = if let Err(err) = result { - err - } else { - return Ok(ReturnCode::Success) - }; - - match err_into_return_code::(err) { - Ok(code) => Ok(code), - Err(err) => Err(store_err(ctx, err)), + /// Fallible conversion of `DispatchError` to `ReturnCode`. + fn err_into_return_code(from: DispatchError) -> Result { + use ReturnCode::*; + + let below_sub = Error::::BelowSubsistenceThreshold.into(); + let transfer_failed = Error::::TransferFailed.into(); + let not_funded = Error::::NewContractNotFunded.into(); + let no_code = Error::::CodeNotFound.into(); + let invalid_contract = Error::::NotCallable.into(); + + match from { + x if x == below_sub => Ok(BelowSubsistenceThreshold), + x if x == transfer_failed => Ok(TransferFailed), + x if x == not_funded => Ok(NewContractNotFunded), + x if x == no_code => Ok(CodeNotFound), + x if x == invalid_contract => Ok(NotCallable), + err => Err(err) + } + } + + /// Fallible conversion of a `ExecResult` to `ReturnCode`. + fn exec_into_return_code(from: ExecResult) -> Result { + use pallet_contracts_primitives::ErrorOrigin::Callee; + + let ExecError { error, origin } = match from { + Ok(retval) => return Ok(retval.into()), + Err(err) => err, + }; + + match (error, origin) { + (_, Callee) => Ok(ReturnCode::CalleeTrapped), + (err, _) => Self::err_into_return_code(err) + } } } @@ -567,7 +616,7 @@ define_env!(Env, , // // - amount: How much gas is used. gas(ctx, amount: u32) => { - charge_gas(ctx, RuntimeToken::MeteringBlock(amount))?; + ctx.charge_gas(RuntimeToken::MeteringBlock(amount))?; Ok(()) }, @@ -587,13 +636,13 @@ define_env!(Env, , // - If value length exceeds the configured maximum value length of a storage entry. // - Upon trying to set an empty storage entry (value length is 0). seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { - charge_gas(ctx, RuntimeToken::SetStorage(value_len))?; + ctx.charge_gas(RuntimeToken::SetStorage(value_len))?; if value_len > ctx.ext.max_value_size() { - Err(store_err(ctx, Error::::ValueTooLarge))?; + Err(ctx.store_err(Error::::ValueTooLarge))?; } let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; - let value = Some(read_sandbox_memory(ctx, value_ptr, value_len)?); + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; + let value = Some(ctx.read_sandbox_memory(value_ptr, value_len)?); ctx.ext.set_storage(key, value); Ok(()) }, @@ -604,9 +653,9 @@ define_env!(Env, , // // - `key_ptr`: pointer into the linear memory where the location to clear the value is placed. seal_clear_storage(ctx, key_ptr: u32) => { - charge_gas(ctx, RuntimeToken::ClearStorage)?; + ctx.charge_gas(RuntimeToken::ClearStorage)?; let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; ctx.ext.set_storage(key, None); Ok(()) }, @@ -624,11 +673,11 @@ define_env!(Env, , // // `ReturnCode::KeyNotFound` seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::GetStorageBase)?; + ctx.charge_gas(RuntimeToken::GetStorageBase)?; let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; if let Some(value) = ctx.ext.get_storage(&key) { - write_sandbox_output(ctx, out_ptr, out_len_ptr, &value, false, |len| { + ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, |len| { Some(RuntimeToken::GetStorageCopyOut(len)) })?; Ok(ReturnCode::Success) @@ -659,14 +708,14 @@ define_env!(Env, , value_ptr: u32, value_len: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::Transfer)?; - let callee: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, account_ptr, account_len)?; + ctx.charge_gas(RuntimeToken::Transfer)?; + let callee: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(account_ptr, account_len)?; let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; + ctx.read_sandbox_memory_as(value_ptr, value_len)?; let result = ctx.ext.transfer(&callee, value); - map_dispatch_result(ctx, result) + ctx.map_dispatch_result(result) }, // Make a call to another contract. @@ -712,14 +761,14 @@ define_env!(Env, , output_ptr: u32, output_len_ptr: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::CallBase(input_data_len))?; - let callee: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, callee_ptr, callee_len)?; - let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; + ctx.charge_gas(RuntimeToken::CallBase(input_data_len))?; + let callee: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(callee_ptr, callee_len)?; + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; + let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; if value > 0u32.into() { - charge_gas(ctx, RuntimeToken::CallSurchargeTransfer)?; + ctx.charge_gas(RuntimeToken::CallSurchargeTransfer)?; } let nested_gas_limit = if gas == 0 { @@ -744,11 +793,11 @@ define_env!(Env, , }); if let Ok(output) = &call_outcome { - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true, |len| { + ctx.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { Some(RuntimeToken::CallCopyOut(len)) })?; } - map_exec_result(ctx, call_outcome) + ctx.map_exec_result(call_outcome) }, // Instantiate a contract with the specified code hash. @@ -779,6 +828,8 @@ define_env!(Env, , // - output_ptr: a pointer where the output buffer is copied to. // - output_len_ptr: in-out pointer to where the length of the buffer is read from // and the actual length is written to. + // - salt_ptr: Pointer to raw bytes used for address deriviation. See `fn contract_address`. + // - salt_len: length in bytes of the supplied salt. // // # Errors // @@ -806,13 +857,16 @@ define_env!(Env, , address_ptr: u32, address_len_ptr: u32, output_ptr: u32, - output_len_ptr: u32 + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::InstantiateBase(input_data_len))?; + ctx.charge_gas(RuntimeToken::InstantiateBase {input_data_len, salt_len})?; let code_hash: CodeHash<::T> = - read_sandbox_memory_as(ctx, code_hash_ptr, code_hash_len)?; - let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; + ctx.read_sandbox_memory_as(code_hash_ptr, code_hash_len)?; + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; + let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; + let salt = ctx.read_sandbox_memory(salt_ptr, salt_len)?; let nested_gas_limit = if gas == 0 { ctx.gas_meter.gas_left() @@ -827,7 +881,8 @@ define_env!(Env, , &code_hash, value, nested_meter, - input_data + input_data, + &salt, ) } // there is not enough gas to allocate for the nested call. @@ -836,15 +891,15 @@ define_env!(Env, , }); if let Ok((address, output)) = &instantiate_outcome { if !output.flags.contains(ReturnFlags::REVERT) { - write_sandbox_output( - ctx, address_ptr, address_len_ptr, &address.encode(), true, already_charged, + ctx.write_sandbox_output( + address_ptr, address_len_ptr, &address.encode(), true, already_charged, )?; } - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true, |len| { + ctx.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { Some(RuntimeToken::InstantiateCopyOut(len)) })?; } - map_exec_result(ctx, instantiate_outcome.map(|(_id, retval)| retval)) + ctx.map_exec_result(instantiate_outcome.map(|(_id, retval)| retval)) }, // Remove the calling account and transfer remaining balance. @@ -866,20 +921,20 @@ define_env!(Env, , beneficiary_ptr: u32, beneficiary_len: u32 ) => { - charge_gas(ctx, RuntimeToken::Terminate)?; - let beneficiary: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, beneficiary_ptr, beneficiary_len)?; + ctx.charge_gas(RuntimeToken::Terminate)?; + let beneficiary: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(beneficiary_ptr, beneficiary_len)?; - if let Ok(_) = ctx.ext.terminate(&beneficiary) { + if let Ok(_) = ctx.ext.terminate(&beneficiary).map_err(|e| ctx.store_err(e)) { ctx.trap_reason = Some(TrapReason::Termination); } Err(sp_sandbox::HostError) }, seal_input(ctx, buf_ptr: u32, buf_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::InputBase)?; + ctx.charge_gas(RuntimeToken::InputBase)?; if let Some(input) = ctx.input_data.take() { - write_sandbox_output(ctx, buf_ptr, buf_len_ptr, &input, false, |len| { + ctx.write_sandbox_output(buf_ptr, buf_len_ptr, &input, false, |len| { Some(RuntimeToken::InputCopyOut(len)) }) } else { @@ -905,10 +960,10 @@ define_env!(Env, , // // Using a reserved bit triggers a trap. seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { - charge_gas(ctx, RuntimeToken::Return(data_len))?; + ctx.charge_gas(RuntimeToken::Return(data_len))?; ctx.trap_reason = Some(TrapReason::Return(ReturnData { flags, - data: read_sandbox_memory(ctx, data_ptr, data_len)?, + data: ctx.read_sandbox_memory(data_ptr, data_len)?, })); // The trap mechanism is used to immediately terminate the execution. @@ -928,9 +983,9 @@ define_env!(Env, , // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the // address of the contract will be returned. The value is encoded as T::AccountId. seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Caller)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::Caller)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged ) }, @@ -941,9 +996,9 @@ define_env!(Env, , // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Address)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::Address)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged ) }, @@ -961,10 +1016,9 @@ define_env!(Env, , // It is recommended to avoid specifying very small values for `gas` as the prices for a single // gas can be smaller than one. seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::WeightToFee)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, - already_charged + ctx.charge_gas(RuntimeToken::WeightToFee)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, already_charged ) }, @@ -977,9 +1031,9 @@ define_env!(Env, , // // The data is encoded as Gas. seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::GasLeft)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::GasLeft)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false, already_charged ) }, @@ -992,9 +1046,9 @@ define_env!(Env, , // // The data is encoded as T::Balance. seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Balance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::Balance)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged ) }, @@ -1007,10 +1061,9 @@ define_env!(Env, , // // The data is encoded as T::Balance. seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::ValueTransferred)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, - already_charged + ctx.charge_gas(RuntimeToken::ValueTransferred)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, already_charged ) }, @@ -1023,14 +1076,13 @@ define_env!(Env, , // // The data is encoded as T::Hash. seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Random)?; + ctx.charge_gas(RuntimeToken::Random)?; if subject_len > ctx.schedule.limits.subject_len { return Err(sp_sandbox::HostError); } - let subject_buf = read_sandbox_memory(ctx, subject_ptr, subject_len)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, - already_charged + let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, already_charged ) }, @@ -1041,9 +1093,9 @@ define_env!(Env, , // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Now)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::Now)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged ) }, @@ -1051,9 +1103,9 @@ define_env!(Env, , // // The data is encoded as T::Balance. seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::MinimumBalance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::MinimumBalance)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged ) }, @@ -1073,10 +1125,9 @@ define_env!(Env, , // below the sum of existential deposit and the tombstone deposit. The sum // is commonly referred as subsistence threshold in code. seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::TombstoneDeposit)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false, - already_charged + ctx.charge_gas(RuntimeToken::TombstoneDeposit)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false, already_charged ) }, @@ -1117,13 +1168,13 @@ define_env!(Env, , delta_ptr: u32, delta_count: u32 ) => { - charge_gas(ctx, RuntimeToken::RestoreTo(delta_count))?; - let dest: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, dest_ptr, dest_len)?; + ctx.charge_gas(RuntimeToken::RestoreTo(delta_count))?; + let dest: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(dest_ptr, dest_len)?; let code_hash: CodeHash<::T> = - read_sandbox_memory_as(ctx, code_hash_ptr, code_hash_len)?; + ctx.read_sandbox_memory_as(code_hash_ptr, code_hash_len)?; let rent_allowance: BalanceOf<::T> = - read_sandbox_memory_as(ctx, rent_allowance_ptr, rent_allowance_len)?; + ctx.read_sandbox_memory_as(rent_allowance_ptr, rent_allowance_len)?; let delta = { // We can eagerly allocate because we charged for the complete delta count already let mut delta = Vec::with_capacity(delta_count as usize); @@ -1134,7 +1185,7 @@ define_env!(Env, , // Read the delta into the provided buffer and collect it into the buffer. let mut delta_key: StorageKey = [0; KEY_SIZE]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut delta_key)?; + ctx.read_sandbox_memory_into_buf(key_ptr, &mut delta_key)?; delta.push(delta_key); // Offset key_ptr to the next element. @@ -1149,7 +1200,7 @@ define_env!(Env, , code_hash, rent_allowance, delta, - ) { + ).map_err(|e| ctx.store_err(e)) { ctx.trap_reason = Some(TrapReason::Restoration); } Err(sp_sandbox::HostError) @@ -1166,18 +1217,18 @@ define_env!(Env, , seal_deposit_event(ctx, topics_ptr: u32, topics_len: u32, data_ptr: u32, data_len: u32) => { let num_topic = topics_len .checked_div(sp_std::mem::size_of::>() as u32) - .ok_or_else(|| store_err(ctx, "Zero sized topics are not allowed"))?; - charge_gas(ctx, RuntimeToken::DepositEvent { + .ok_or_else(|| ctx.store_err("Zero sized topics are not allowed"))?; + ctx.charge_gas(RuntimeToken::DepositEvent { num_topic, len: data_len, })?; if data_len > ctx.ext.max_value_size() { - Err(store_err(ctx, Error::::ValueTooLarge))?; + Err(ctx.store_err(Error::::ValueTooLarge))?; } let mut topics: Vec::::T>> = match topics_len { 0 => Vec::new(), - _ => read_sandbox_memory_as(ctx, topics_ptr, topics_len)?, + _ => ctx.read_sandbox_memory_as(topics_ptr, topics_len)?, }; // If there are more than `event_topics`, then trap. @@ -1190,7 +1241,7 @@ define_env!(Env, , return Err(sp_sandbox::HostError); } - let event_data = read_sandbox_memory(ctx, data_ptr, data_len)?; + let event_data = ctx.read_sandbox_memory(data_ptr, data_len)?; ctx.ext.deposit_event(topics, event_data); @@ -1203,9 +1254,9 @@ define_env!(Env, , // Should be decodable as a `T::Balance`. Traps otherwise. // - value_len: length of the value buffer. seal_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { - charge_gas(ctx, RuntimeToken::SetRentAllowance)?; + ctx.charge_gas(RuntimeToken::SetRentAllowance)?; let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; + ctx.read_sandbox_memory_as(value_ptr, value_len)?; ctx.ext.set_rent_allowance(value); Ok(()) @@ -1220,9 +1271,9 @@ define_env!(Env, , // // The data is encoded as T::Balance. seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::RentAllowance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::RentAllowance)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false, already_charged ) }, @@ -1230,7 +1281,7 @@ define_env!(Env, , // Only available on `--dev` chains. // This function may be removed at any time, superseded by a more general contract debugging feature. seal_println(ctx, str_ptr: u32, str_len: u32) => { - let data = read_sandbox_memory(ctx, str_ptr, str_len)?; + let data = ctx.read_sandbox_memory(str_ptr, str_len)?; if let Ok(utf8) = core::str::from_utf8(&data) { sp_runtime::print(utf8); } @@ -1244,9 +1295,9 @@ define_env!(Env, , // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::BlockNumber)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged + ctx.charge_gas(RuntimeToken::BlockNumber)?; + ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged ) }, @@ -1271,8 +1322,8 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashSha256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, sha2_256, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashSha256(input_len))?; + ctx.compute_hash_on_intermediate_buffer(sha2_256, input_ptr, input_len, output_ptr) }, // Computes the KECCAK 256-bit hash on the given input buffer. @@ -1296,8 +1347,8 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashKeccak256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, keccak_256, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashKeccak256(input_len))?; + ctx.compute_hash_on_intermediate_buffer(keccak_256, input_ptr, input_len, output_ptr) }, // Computes the BLAKE2 256-bit hash on the given input buffer. @@ -1321,8 +1372,8 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashBlake256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, blake2_256, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashBlake256(input_len))?; + ctx.compute_hash_on_intermediate_buffer(blake2_256, input_ptr, input_len, output_ptr) }, // Computes the BLAKE2 128-bit hash on the given input buffer. @@ -1346,62 +1397,7 @@ define_env!(Env, , // data is placed. The function will write the result // directly into this buffer. seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashBlake128(input_len))?; - compute_hash_on_intermediate_buffer(ctx, blake2_128, input_ptr, input_len, output_ptr) + ctx.charge_gas(RuntimeToken::HashBlake128(input_len))?; + ctx.compute_hash_on_intermediate_buffer(blake2_128, input_ptr, input_len, output_ptr) }, ); - -/// Computes the given hash function on the supplied input. -/// -/// Reads from the sandboxed input buffer into an intermediate buffer. -/// Returns the result directly to the output buffer of the sandboxed memory. -/// -/// It is the callers responsibility to provide an output buffer that -/// is large enough to hold the expected amount of bytes returned by the -/// chosen hash function. -/// -/// # Note -/// -/// The `input` and `output` buffers may overlap. -fn compute_hash_on_intermediate_buffer( - ctx: &mut Runtime, - hash_fn: F, - input_ptr: u32, - input_len: u32, - output_ptr: u32, -) -> Result<(), sp_sandbox::HostError> -where - E: Ext, - F: FnOnce(&[u8]) -> R, - R: AsRef<[u8]>, -{ - // Copy input into supervisor memory. - let input = read_sandbox_memory(ctx, input_ptr, input_len)?; - // Compute the hash on the input buffer using the given hash function. - let hash = hash_fn(&input); - // Write the resulting hash back into the sandboxed output buffer. - write_sandbox_memory( - ctx, - output_ptr, - hash.as_ref(), - )?; - Ok(()) -} - -/// Finds duplicates in a given vector. -/// -/// This function has complexity of O(n log n) and no additional memory is required, although -/// the order of items is not preserved. -fn has_duplicates>(items: &mut Vec) -> bool { - // Sort the vector - items.sort_by(|a, b| { - Ord::cmp(a.as_ref(), b.as_ref()) - }); - // And then find any two consecutive equal elements. - items.windows(2).any(|w| { - match w { - &[ref a, ref b] => a == b, - _ => false, - } - }) -} diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index a84acbfd79ace7eb135e2711a89b1c8fc1d9ff4b..24c1273a44ffb94103b4380a5fbc6d719b3e8f6a 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -17,7 +17,7 @@ //! Weights for pallet_contracts //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-11-10, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -45,7 +45,7 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn update_schedule() -> Weight; fn put_code(n: u32, ) -> Weight; - fn instantiate(n: u32, ) -> Weight; + fn instantiate(n: u32, s: u32, ) -> Weight; fn call() -> Weight; fn claim_surcharge() -> Weight; fn seal_caller(r: u32, ) -> Weight; @@ -80,7 +80,7 @@ pub trait WeightInfo { fn seal_call(r: u32, ) -> Weight; fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight; fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight; + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight; fn seal_hash_sha2_256(r: u32, ) -> Weight; fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; fn seal_hash_keccak_256(r: u32, ) -> Weight; @@ -140,228 +140,201 @@ pub trait WeightInfo { fn instr_i64shru(r: u32, ) -> Weight; fn instr_i64rotl(r: u32, ) -> Weight; fn instr_i64rotr(r: u32, ) -> Weight; - } /// Weights for pallet_contracts using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn update_schedule() -> Weight { - (33_160_000 as Weight) + (35_214_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn put_code(n: u32, ) -> Weight { - (5_975_000 as Weight) - .saturating_add((108_953_000 as Weight).saturating_mul(n as Weight)) + (0 as Weight) + .saturating_add((109_242_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn instantiate(n: u32, ) -> Weight { - (218_223_000 as Weight) - .saturating_add((1_007_000 as Weight).saturating_mul(n as Weight)) + fn instantiate(n: u32, s: u32, ) -> Weight { + (195_276_000 as Weight) + .saturating_add((35_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((2_244_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } fn call() -> Weight { - (201_492_000 as Weight) + (207_142_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn claim_surcharge() -> Weight { - (449_203_000 as Weight) + (489_633_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn seal_caller(r: u32, ) -> Weight { - (136_650_000 as Weight) - .saturating_add((364_640_000 as Weight).saturating_mul(r as Weight)) + (136_550_000 as Weight) + .saturating_add((373_182_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_address(r: u32, ) -> Weight { - (144_167_000 as Weight) - .saturating_add((365_328_000 as Weight).saturating_mul(r as Weight)) + (136_329_000 as Weight) + .saturating_add((373_392_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_gas_left(r: u32, ) -> Weight { - (138_458_000 as Weight) - .saturating_add((361_076_000 as Weight).saturating_mul(r as Weight)) + (111_577_000 as Weight) + .saturating_add((373_536_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_balance(r: u32, ) -> Weight { - (147_909_000 as Weight) - .saturating_add((792_169_000 as Weight).saturating_mul(r as Weight)) + (157_531_000 as Weight) + .saturating_add((810_382_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } fn seal_value_transferred(r: u32, ) -> Weight { - (148_524_000 as Weight) - .saturating_add((361_842_000 as Weight).saturating_mul(r as Weight)) + (143_801_000 as Weight) + .saturating_add((369_769_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_minimum_balance(r: u32, ) -> Weight { - (139_795_000 as Weight) - .saturating_add((366_013_000 as Weight).saturating_mul(r as Weight)) + (133_546_000 as Weight) + .saturating_add((370_036_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (140_557_000 as Weight) - .saturating_add((362_687_000 as Weight).saturating_mul(r as Weight)) + (138_568_000 as Weight) + .saturating_add((370_322_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_rent_allowance(r: u32, ) -> Weight { - (152_989_000 as Weight) - .saturating_add((836_876_000 as Weight).saturating_mul(r as Weight)) + (144_431_000 as Weight) + .saturating_add((851_810_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_block_number(r: u32, ) -> Weight { - (140_228_000 as Weight) - .saturating_add((360_561_000 as Weight).saturating_mul(r as Weight)) + (133_237_000 as Weight) + .saturating_add((369_156_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_now(r: u32, ) -> Weight { - (148_776_000 as Weight) - .saturating_add((361_712_000 as Weight).saturating_mul(r as Weight)) + (139_700_000 as Weight) + .saturating_add((368_961_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_weight_to_fee(r: u32, ) -> Weight { - (126_903_000 as Weight) - .saturating_add((603_100_000 as Weight).saturating_mul(r as Weight)) + (149_395_000 as Weight) + .saturating_add((625_812_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } fn seal_gas(r: u32, ) -> Weight { - (125_712_000 as Weight) - .saturating_add((184_450_000 as Weight).saturating_mul(r as Weight)) + (125_777_000 as Weight) + .saturating_add((187_585_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_input(r: u32, ) -> Weight { - (136_175_000 as Weight) - .saturating_add((7_489_000 as Weight).saturating_mul(r as Weight)) + (132_584_000 as Weight) + .saturating_add((7_661_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_input_per_kb(n: u32, ) -> Weight { - (145_434_000 as Weight) - .saturating_add((276_000 as Weight).saturating_mul(n as Weight)) + (143_408_000 as Weight) + .saturating_add((274_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_return(r: u32, ) -> Weight { - (124_788_000 as Weight) - .saturating_add((5_696_000 as Weight).saturating_mul(r as Weight)) + (126_257_000 as Weight) + .saturating_add((5_455_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_return_per_kb(n: u32, ) -> Weight { - (133_483_000 as Weight) - .saturating_add((675_000 as Weight).saturating_mul(n as Weight)) + (133_286_000 as Weight) + .saturating_add((698_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_terminate(r: u32, ) -> Weight { - (135_387_000 as Weight) - .saturating_add((338_395_000 as Weight).saturating_mul(r as Weight)) + (130_607_000 as Weight) + .saturating_add((358_370_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } fn seal_restore_to(r: u32, ) -> Weight { - (227_617_000 as Weight) - .saturating_add((132_493_000 as Weight).saturating_mul(r as Weight)) + (233_645_000 as Weight) + .saturating_add((135_355_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) } fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (15_263_000 as Weight) - .saturating_add((3_732_219_000 as Weight).saturating_mul(d as Weight)) + (74_573_000 as Weight) + .saturating_add((3_768_682_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(T::DbWeight::get().writes(5 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (236_391_000 as Weight) - .saturating_add((913_452_000 as Weight).saturating_mul(r as Weight)) + (140_286_000 as Weight) + .saturating_add((950_890_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } fn seal_deposit_event(r: u32, ) -> Weight { - (140_845_000 as Weight) - .saturating_add((1_322_796_000 as Weight).saturating_mul(r as Weight)) + (167_735_000 as Weight) + .saturating_add((1_375_429_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_651_556_000 as Weight) - .saturating_add((737_421_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((244_183_000 as Weight).saturating_mul(n as Weight)) + (1_715_857_000 as Weight) + .saturating_add((760_777_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((241_853_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (151_091_000 as Weight) - .saturating_add((983_375_000 as Weight).saturating_mul(r as Weight)) + (156_911_000 as Weight) + .saturating_add((1_006_139_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn seal_set_storage(r: u32, ) -> Weight { - (460_478_000 as Weight) - .saturating_add((14_824_033_000 as Weight).saturating_mul(r as Weight)) + (0 as Weight) + .saturating_add((14_938_793_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (2_255_458_000 as Weight) - .saturating_add((204_470_000 as Weight).saturating_mul(n as Weight)) + (2_300_169_000 as Weight) + .saturating_add((204_543_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - .saturating_add((5_052_125_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_140_241_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_get_storage(r: u32, ) -> Weight { - (95_473_000 as Weight) - .saturating_add((1_044_784_000 as Weight).saturating_mul(r as Weight)) + (45_212_000 as Weight) + .saturating_add((1_131_504_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (860_080_000 as Weight) - .saturating_add((146_913_000 as Weight).saturating_mul(n as Weight)) + (885_531_000 as Weight) + .saturating_add((148_986_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } fn seal_transfer(r: u32, ) -> Weight { - (107_119_000 as Weight) - .saturating_add((5_993_434_000 as Weight).saturating_mul(r as Weight)) + (92_276_000 as Weight) + .saturating_add((6_216_852_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -369,560 +342,473 @@ impl WeightInfo for SubstrateWeight { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - .saturating_add((10_533_320_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((10_734_719_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (9_839_633_000 as Weight) - .saturating_add((5_580_035_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((53_716_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((73_668_000 as Weight).saturating_mul(o as Weight)) + (12_735_614_000 as Weight) + .saturating_add((2_870_730_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((52_569_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((73_956_000 as Weight).saturating_mul(o as Weight)) .saturating_add(T::DbWeight::get().reads(105 as Weight)) .saturating_add(T::DbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - .saturating_add((21_856_497_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((22_365_908_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((200 as Weight).saturating_mul(r as Weight))) } - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight { - (18_796_671_000 as Weight) - .saturating_add((156_269_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((74_645_000 as Weight).saturating_mul(o as Weight)) + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (18_899_296_000 as Weight) + .saturating_add((53_289_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((76_026_000 as Weight).saturating_mul(o as Weight)) + .saturating_add((281_097_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(207 as Weight)) .saturating_add(T::DbWeight::get().writes(202 as Weight)) - } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (132_190_000 as Weight) - .saturating_add((319_943_000 as Weight).saturating_mul(r as Weight)) + (136_601_000 as Weight) + .saturating_add((323_373_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (747_208_000 as Weight) - .saturating_add((421_808_000 as Weight).saturating_mul(n as Weight)) + (777_563_000 as Weight) + .saturating_add((423_353_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (139_235_000 as Weight) - .saturating_add((333_792_000 as Weight).saturating_mul(r as Weight)) + (136_771_000 as Weight) + .saturating_add((337_881_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (543_256_000 as Weight) - .saturating_add((334_383_000 as Weight).saturating_mul(n as Weight)) + (337_906_000 as Weight) + .saturating_add((336_778_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (142_704_000 as Weight) - .saturating_add((305_513_000 as Weight).saturating_mul(r as Weight)) + (131_040_000 as Weight) + .saturating_add((312_992_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (592_813_000 as Weight) - .saturating_add((151_270_000 as Weight).saturating_mul(n as Weight)) + (693_415_000 as Weight) + .saturating_add((152_745_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (139_921_000 as Weight) - .saturating_add((304_746_000 as Weight).saturating_mul(r as Weight)) + (135_654_000 as Weight) + .saturating_add((311_271_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (544_524_000 as Weight) - .saturating_add((151_549_000 as Weight).saturating_mul(n as Weight)) + (839_521_000 as Weight) + .saturating_add((153_146_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } fn instr_i64const(r: u32, ) -> Weight { - (24_652_000 as Weight) - .saturating_add((3_306_000 as Weight).saturating_mul(r as Weight)) - + (26_679_000 as Weight) + .saturating_add((3_155_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (27_131_000 as Weight) - .saturating_add((162_220_000 as Weight).saturating_mul(r as Weight)) - + (28_920_000 as Weight) + .saturating_add((159_343_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (27_086_000 as Weight) - .saturating_add((230_977_000 as Weight).saturating_mul(r as Weight)) - + (28_928_000 as Weight) + .saturating_add((227_286_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (24_656_000 as Weight) - .saturating_add((12_570_000 as Weight).saturating_mul(r as Weight)) - + (26_591_000 as Weight) + .saturating_add((12_591_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (24_643_000 as Weight) - .saturating_add((12_442_000 as Weight).saturating_mul(r as Weight)) - + (26_597_000 as Weight) + .saturating_add((12_258_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (24_589_000 as Weight) - .saturating_add((6_237_000 as Weight).saturating_mul(r as Weight)) - + (26_586_000 as Weight) + .saturating_add((5_811_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (24_650_000 as Weight) - .saturating_add((14_393_000 as Weight).saturating_mul(r as Weight)) - + (26_581_000 as Weight) + .saturating_add((14_058_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (24_689_000 as Weight) - .saturating_add((15_706_000 as Weight).saturating_mul(r as Weight)) - + (26_615_000 as Weight) + .saturating_add((15_687_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (40_129_000 as Weight) - .saturating_add((83_000 as Weight).saturating_mul(e as Weight)) - + (40_963_000 as Weight) + .saturating_add((92_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (24_904_000 as Weight) - .saturating_add((96_429_000 as Weight).saturating_mul(r as Weight)) - + (26_880_000 as Weight) + .saturating_add((97_523_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (32_540_000 as Weight) - .saturating_add((201_773_000 as Weight).saturating_mul(r as Weight)) - + (34_628_000 as Weight) + .saturating_add((201_913_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (248_700_000 as Weight) - .saturating_add((3_705_000 as Weight).saturating_mul(p as Weight)) - + (255_763_000 as Weight) + .saturating_add((3_612_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (42_081_000 as Weight) - .saturating_add((3_548_000 as Weight).saturating_mul(r as Weight)) - + (45_954_000 as Weight) + .saturating_add((3_439_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (42_128_000 as Weight) - .saturating_add((3_678_000 as Weight).saturating_mul(r as Weight)) - + (45_952_000 as Weight) + .saturating_add((3_601_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (42_073_000 as Weight) - .saturating_add((5_212_000 as Weight).saturating_mul(r as Weight)) - + (45_883_000 as Weight) + .saturating_add((5_203_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (28_182_000 as Weight) - .saturating_add((8_180_000 as Weight).saturating_mul(r as Weight)) - + (29_895_000 as Weight) + .saturating_add((8_221_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (28_060_000 as Weight) - .saturating_add((12_081_000 as Weight).saturating_mul(r as Weight)) - + (29_916_000 as Weight) + .saturating_add((12_036_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (27_113_000 as Weight) - .saturating_add((3_802_000 as Weight).saturating_mul(r as Weight)) - + (28_878_000 as Weight) + .saturating_add((3_794_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (25_521_000 as Weight) - .saturating_add((2_288_295_000 as Weight).saturating_mul(r as Weight)) - + (27_351_000 as Weight) + .saturating_add((2_302_301_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (24_662_000 as Weight) - .saturating_add((5_497_000 as Weight).saturating_mul(r as Weight)) - + (26_535_000 as Weight) + .saturating_add((5_450_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (24_647_000 as Weight) - .saturating_add((5_556_000 as Weight).saturating_mul(r as Weight)) - + (26_489_000 as Weight) + .saturating_add((5_410_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (24_646_000 as Weight) - .saturating_add((6_138_000 as Weight).saturating_mul(r as Weight)) - + (26_576_000 as Weight) + .saturating_add((5_976_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (24_649_000 as Weight) - .saturating_add((5_477_000 as Weight).saturating_mul(r as Weight)) - + (26_521_000 as Weight) + .saturating_add((5_465_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (24_655_000 as Weight) - .saturating_add((5_414_000 as Weight).saturating_mul(r as Weight)) - + (26_534_000 as Weight) + .saturating_add((5_375_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (24_619_000 as Weight) - .saturating_add((5_434_000 as Weight).saturating_mul(r as Weight)) - + (26_560_000 as Weight) + .saturating_add((5_284_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (24_654_000 as Weight) - .saturating_add((5_483_000 as Weight).saturating_mul(r as Weight)) - + (26_554_000 as Weight) + .saturating_add((5_358_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (24_690_000 as Weight) - .saturating_add((7_485_000 as Weight).saturating_mul(r as Weight)) - + (26_549_000 as Weight) + .saturating_add((7_402_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (24_652_000 as Weight) - .saturating_add((7_468_000 as Weight).saturating_mul(r as Weight)) - + (26_582_000 as Weight) + .saturating_add((7_266_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (24_667_000 as Weight) - .saturating_add((7_426_000 as Weight).saturating_mul(r as Weight)) - + (26_558_000 as Weight) + .saturating_add((7_293_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (24_693_000 as Weight) - .saturating_add((7_393_000 as Weight).saturating_mul(r as Weight)) - + (26_569_000 as Weight) + .saturating_add((7_278_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (24_675_000 as Weight) - .saturating_add((7_407_000 as Weight).saturating_mul(r as Weight)) - + (26_516_000 as Weight) + .saturating_add((7_334_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (24_697_000 as Weight) - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) - + (26_561_000 as Weight) + .saturating_add((7_283_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (24_646_000 as Weight) - .saturating_add((7_420_000 as Weight).saturating_mul(r as Weight)) - + (26_589_000 as Weight) + .saturating_add((7_244_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (24_683_000 as Weight) - .saturating_add((7_404_000 as Weight).saturating_mul(r as Weight)) - + (26_593_000 as Weight) + .saturating_add((7_318_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (24_685_000 as Weight) - .saturating_add((7_461_000 as Weight).saturating_mul(r as Weight)) - + (26_626_000 as Weight) + .saturating_add((7_348_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (25_147_000 as Weight) - .saturating_add((7_003_000 as Weight).saturating_mul(r as Weight)) - + (26_595_000 as Weight) + .saturating_add((7_330_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (24_705_000 as Weight) - .saturating_add((7_483_000 as Weight).saturating_mul(r as Weight)) - + (26_568_000 as Weight) + .saturating_add((8_657_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (24_675_000 as Weight) - .saturating_add((7_377_000 as Weight).saturating_mul(r as Weight)) - + (27_393_000 as Weight) + .saturating_add((6_743_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (24_680_000 as Weight) - .saturating_add((7_376_000 as Weight).saturating_mul(r as Weight)) - + (26_571_000 as Weight) + .saturating_add((7_329_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (24_660_000 as Weight) - .saturating_add((13_091_000 as Weight).saturating_mul(r as Weight)) - + (26_585_000 as Weight) + .saturating_add((12_977_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (24_643_000 as Weight) - .saturating_add((12_109_000 as Weight).saturating_mul(r as Weight)) - + (26_554_000 as Weight) + .saturating_add((11_955_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (24_615_000 as Weight) - .saturating_add((13_049_000 as Weight).saturating_mul(r as Weight)) - + (26_570_000 as Weight) + .saturating_add((12_903_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (24_696_000 as Weight) - .saturating_add((12_039_000 as Weight).saturating_mul(r as Weight)) - + (26_561_000 as Weight) + .saturating_add((12_112_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (24_683_000 as Weight) - .saturating_add((7_314_000 as Weight).saturating_mul(r as Weight)) - + (26_587_000 as Weight) + .saturating_add((7_411_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (24_657_000 as Weight) - .saturating_add((7_401_000 as Weight).saturating_mul(r as Weight)) - + (26_588_000 as Weight) + .saturating_add((7_479_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (24_661_000 as Weight) - .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) - + (26_541_000 as Weight) + .saturating_add((7_386_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (24_644_000 as Weight) - .saturating_add((7_389_000 as Weight).saturating_mul(r as Weight)) - + (26_562_000 as Weight) + .saturating_add((7_263_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (24_643_000 as Weight) - .saturating_add((7_416_000 as Weight).saturating_mul(r as Weight)) - + (26_569_000 as Weight) + .saturating_add((7_353_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (24_634_000 as Weight) - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) - + (26_533_000 as Weight) + .saturating_add((7_342_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (24_618_000 as Weight) - .saturating_add((7_452_000 as Weight).saturating_mul(r as Weight)) - + (26_545_000 as Weight) + .saturating_add((7_362_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (24_618_000 as Weight) - .saturating_add((7_447_000 as Weight).saturating_mul(r as Weight)) - + (26_535_000 as Weight) + .saturating_add((7_330_000 as Weight).saturating_mul(r as Weight)) } - } // For backwards compatibility and tests impl WeightInfo for () { fn update_schedule() -> Weight { - (33_160_000 as Weight) + (35_214_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn put_code(n: u32, ) -> Weight { - (5_975_000 as Weight) - .saturating_add((108_953_000 as Weight).saturating_mul(n as Weight)) + (0 as Weight) + .saturating_add((109_242_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } - fn instantiate(n: u32, ) -> Weight { - (218_223_000 as Weight) - .saturating_add((1_007_000 as Weight).saturating_mul(n as Weight)) + fn instantiate(n: u32, s: u32, ) -> Weight { + (195_276_000 as Weight) + .saturating_add((35_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((2_244_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } fn call() -> Weight { - (201_492_000 as Weight) + (207_142_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn claim_surcharge() -> Weight { - (449_203_000 as Weight) + (489_633_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn seal_caller(r: u32, ) -> Weight { - (136_650_000 as Weight) - .saturating_add((364_640_000 as Weight).saturating_mul(r as Weight)) + (136_550_000 as Weight) + .saturating_add((373_182_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_address(r: u32, ) -> Weight { - (144_167_000 as Weight) - .saturating_add((365_328_000 as Weight).saturating_mul(r as Weight)) + (136_329_000 as Weight) + .saturating_add((373_392_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_gas_left(r: u32, ) -> Weight { - (138_458_000 as Weight) - .saturating_add((361_076_000 as Weight).saturating_mul(r as Weight)) + (111_577_000 as Weight) + .saturating_add((373_536_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_balance(r: u32, ) -> Weight { - (147_909_000 as Weight) - .saturating_add((792_169_000 as Weight).saturating_mul(r as Weight)) + (157_531_000 as Weight) + .saturating_add((810_382_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - } fn seal_value_transferred(r: u32, ) -> Weight { - (148_524_000 as Weight) - .saturating_add((361_842_000 as Weight).saturating_mul(r as Weight)) + (143_801_000 as Weight) + .saturating_add((369_769_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_minimum_balance(r: u32, ) -> Weight { - (139_795_000 as Weight) - .saturating_add((366_013_000 as Weight).saturating_mul(r as Weight)) + (133_546_000 as Weight) + .saturating_add((370_036_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (140_557_000 as Weight) - .saturating_add((362_687_000 as Weight).saturating_mul(r as Weight)) + (138_568_000 as Weight) + .saturating_add((370_322_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_rent_allowance(r: u32, ) -> Weight { - (152_989_000 as Weight) - .saturating_add((836_876_000 as Weight).saturating_mul(r as Weight)) + (144_431_000 as Weight) + .saturating_add((851_810_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_block_number(r: u32, ) -> Weight { - (140_228_000 as Weight) - .saturating_add((360_561_000 as Weight).saturating_mul(r as Weight)) + (133_237_000 as Weight) + .saturating_add((369_156_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_now(r: u32, ) -> Weight { - (148_776_000 as Weight) - .saturating_add((361_712_000 as Weight).saturating_mul(r as Weight)) + (139_700_000 as Weight) + .saturating_add((368_961_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_weight_to_fee(r: u32, ) -> Weight { - (126_903_000 as Weight) - .saturating_add((603_100_000 as Weight).saturating_mul(r as Weight)) + (149_395_000 as Weight) + .saturating_add((625_812_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - } fn seal_gas(r: u32, ) -> Weight { - (125_712_000 as Weight) - .saturating_add((184_450_000 as Weight).saturating_mul(r as Weight)) + (125_777_000 as Weight) + .saturating_add((187_585_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_input(r: u32, ) -> Weight { - (136_175_000 as Weight) - .saturating_add((7_489_000 as Weight).saturating_mul(r as Weight)) + (132_584_000 as Weight) + .saturating_add((7_661_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_input_per_kb(n: u32, ) -> Weight { - (145_434_000 as Weight) - .saturating_add((276_000 as Weight).saturating_mul(n as Weight)) + (143_408_000 as Weight) + .saturating_add((274_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_return(r: u32, ) -> Weight { - (124_788_000 as Weight) - .saturating_add((5_696_000 as Weight).saturating_mul(r as Weight)) + (126_257_000 as Weight) + .saturating_add((5_455_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_return_per_kb(n: u32, ) -> Weight { - (133_483_000 as Weight) - .saturating_add((675_000 as Weight).saturating_mul(n as Weight)) + (133_286_000 as Weight) + .saturating_add((698_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_terminate(r: u32, ) -> Weight { - (135_387_000 as Weight) - .saturating_add((338_395_000 as Weight).saturating_mul(r as Weight)) + (130_607_000 as Weight) + .saturating_add((358_370_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) } fn seal_restore_to(r: u32, ) -> Weight { - (227_617_000 as Weight) - .saturating_add((132_493_000 as Weight).saturating_mul(r as Weight)) + (233_645_000 as Weight) + .saturating_add((135_355_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) } fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (15_263_000 as Weight) - .saturating_add((3_732_219_000 as Weight).saturating_mul(d as Weight)) + (74_573_000 as Weight) + .saturating_add((3_768_682_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (236_391_000 as Weight) - .saturating_add((913_452_000 as Weight).saturating_mul(r as Weight)) + (140_286_000 as Weight) + .saturating_add((950_890_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - } fn seal_deposit_event(r: u32, ) -> Weight { - (140_845_000 as Weight) - .saturating_add((1_322_796_000 as Weight).saturating_mul(r as Weight)) + (167_735_000 as Weight) + .saturating_add((1_375_429_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_651_556_000 as Weight) - .saturating_add((737_421_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((244_183_000 as Weight).saturating_mul(n as Weight)) + (1_715_857_000 as Weight) + .saturating_add((760_777_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((241_853_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (151_091_000 as Weight) - .saturating_add((983_375_000 as Weight).saturating_mul(r as Weight)) + (156_911_000 as Weight) + .saturating_add((1_006_139_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn seal_set_storage(r: u32, ) -> Weight { - (460_478_000 as Weight) - .saturating_add((14_824_033_000 as Weight).saturating_mul(r as Weight)) + (0 as Weight) + .saturating_add((14_938_793_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (2_255_458_000 as Weight) - .saturating_add((204_470_000 as Weight).saturating_mul(n as Weight)) + (2_300_169_000 as Weight) + .saturating_add((204_543_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - .saturating_add((5_052_125_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_140_241_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_get_storage(r: u32, ) -> Weight { - (95_473_000 as Weight) - .saturating_add((1_044_784_000 as Weight).saturating_mul(r as Weight)) + (45_212_000 as Weight) + .saturating_add((1_131_504_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (860_080_000 as Weight) - .saturating_add((146_913_000 as Weight).saturating_mul(n as Weight)) + (885_531_000 as Weight) + .saturating_add((148_986_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - } fn seal_transfer(r: u32, ) -> Weight { - (107_119_000 as Weight) - .saturating_add((5_993_434_000 as Weight).saturating_mul(r as Weight)) + (92_276_000 as Weight) + .saturating_add((6_216_852_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -930,338 +816,277 @@ impl WeightInfo for () { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - .saturating_add((10_533_320_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((10_734_719_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (9_839_633_000 as Weight) - .saturating_add((5_580_035_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((53_716_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((73_668_000 as Weight).saturating_mul(o as Weight)) + (12_735_614_000 as Weight) + .saturating_add((2_870_730_000 as Weight).saturating_mul(t as Weight)) + .saturating_add((52_569_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((73_956_000 as Weight).saturating_mul(o as Weight)) .saturating_add(RocksDbWeight::get().reads(105 as Weight)) .saturating_add(RocksDbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - .saturating_add((21_856_497_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((22_365_908_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((200 as Weight).saturating_mul(r as Weight))) } - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight { - (18_796_671_000 as Weight) - .saturating_add((156_269_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((74_645_000 as Weight).saturating_mul(o as Weight)) + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (18_899_296_000 as Weight) + .saturating_add((53_289_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((76_026_000 as Weight).saturating_mul(o as Weight)) + .saturating_add((281_097_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(207 as Weight)) .saturating_add(RocksDbWeight::get().writes(202 as Weight)) - } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (132_190_000 as Weight) - .saturating_add((319_943_000 as Weight).saturating_mul(r as Weight)) + (136_601_000 as Weight) + .saturating_add((323_373_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (747_208_000 as Weight) - .saturating_add((421_808_000 as Weight).saturating_mul(n as Weight)) + (777_563_000 as Weight) + .saturating_add((423_353_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (139_235_000 as Weight) - .saturating_add((333_792_000 as Weight).saturating_mul(r as Weight)) + (136_771_000 as Weight) + .saturating_add((337_881_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (543_256_000 as Weight) - .saturating_add((334_383_000 as Weight).saturating_mul(n as Weight)) + (337_906_000 as Weight) + .saturating_add((336_778_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (142_704_000 as Weight) - .saturating_add((305_513_000 as Weight).saturating_mul(r as Weight)) + (131_040_000 as Weight) + .saturating_add((312_992_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (592_813_000 as Weight) - .saturating_add((151_270_000 as Weight).saturating_mul(n as Weight)) + (693_415_000 as Weight) + .saturating_add((152_745_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (139_921_000 as Weight) - .saturating_add((304_746_000 as Weight).saturating_mul(r as Weight)) + (135_654_000 as Weight) + .saturating_add((311_271_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (544_524_000 as Weight) - .saturating_add((151_549_000 as Weight).saturating_mul(n as Weight)) + (839_521_000 as Weight) + .saturating_add((153_146_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - } fn instr_i64const(r: u32, ) -> Weight { - (24_652_000 as Weight) - .saturating_add((3_306_000 as Weight).saturating_mul(r as Weight)) - + (26_679_000 as Weight) + .saturating_add((3_155_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (27_131_000 as Weight) - .saturating_add((162_220_000 as Weight).saturating_mul(r as Weight)) - + (28_920_000 as Weight) + .saturating_add((159_343_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (27_086_000 as Weight) - .saturating_add((230_977_000 as Weight).saturating_mul(r as Weight)) - + (28_928_000 as Weight) + .saturating_add((227_286_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (24_656_000 as Weight) - .saturating_add((12_570_000 as Weight).saturating_mul(r as Weight)) - + (26_591_000 as Weight) + .saturating_add((12_591_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (24_643_000 as Weight) - .saturating_add((12_442_000 as Weight).saturating_mul(r as Weight)) - + (26_597_000 as Weight) + .saturating_add((12_258_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (24_589_000 as Weight) - .saturating_add((6_237_000 as Weight).saturating_mul(r as Weight)) - + (26_586_000 as Weight) + .saturating_add((5_811_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (24_650_000 as Weight) - .saturating_add((14_393_000 as Weight).saturating_mul(r as Weight)) - + (26_581_000 as Weight) + .saturating_add((14_058_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (24_689_000 as Weight) - .saturating_add((15_706_000 as Weight).saturating_mul(r as Weight)) - + (26_615_000 as Weight) + .saturating_add((15_687_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (40_129_000 as Weight) - .saturating_add((83_000 as Weight).saturating_mul(e as Weight)) - + (40_963_000 as Weight) + .saturating_add((92_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (24_904_000 as Weight) - .saturating_add((96_429_000 as Weight).saturating_mul(r as Weight)) - + (26_880_000 as Weight) + .saturating_add((97_523_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (32_540_000 as Weight) - .saturating_add((201_773_000 as Weight).saturating_mul(r as Weight)) - + (34_628_000 as Weight) + .saturating_add((201_913_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (248_700_000 as Weight) - .saturating_add((3_705_000 as Weight).saturating_mul(p as Weight)) - + (255_763_000 as Weight) + .saturating_add((3_612_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (42_081_000 as Weight) - .saturating_add((3_548_000 as Weight).saturating_mul(r as Weight)) - + (45_954_000 as Weight) + .saturating_add((3_439_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (42_128_000 as Weight) - .saturating_add((3_678_000 as Weight).saturating_mul(r as Weight)) - + (45_952_000 as Weight) + .saturating_add((3_601_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (42_073_000 as Weight) - .saturating_add((5_212_000 as Weight).saturating_mul(r as Weight)) - + (45_883_000 as Weight) + .saturating_add((5_203_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (28_182_000 as Weight) - .saturating_add((8_180_000 as Weight).saturating_mul(r as Weight)) - + (29_895_000 as Weight) + .saturating_add((8_221_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (28_060_000 as Weight) - .saturating_add((12_081_000 as Weight).saturating_mul(r as Weight)) - + (29_916_000 as Weight) + .saturating_add((12_036_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (27_113_000 as Weight) - .saturating_add((3_802_000 as Weight).saturating_mul(r as Weight)) - + (28_878_000 as Weight) + .saturating_add((3_794_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (25_521_000 as Weight) - .saturating_add((2_288_295_000 as Weight).saturating_mul(r as Weight)) - + (27_351_000 as Weight) + .saturating_add((2_302_301_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (24_662_000 as Weight) - .saturating_add((5_497_000 as Weight).saturating_mul(r as Weight)) - + (26_535_000 as Weight) + .saturating_add((5_450_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (24_647_000 as Weight) - .saturating_add((5_556_000 as Weight).saturating_mul(r as Weight)) - + (26_489_000 as Weight) + .saturating_add((5_410_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (24_646_000 as Weight) - .saturating_add((6_138_000 as Weight).saturating_mul(r as Weight)) - + (26_576_000 as Weight) + .saturating_add((5_976_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (24_649_000 as Weight) - .saturating_add((5_477_000 as Weight).saturating_mul(r as Weight)) - + (26_521_000 as Weight) + .saturating_add((5_465_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (24_655_000 as Weight) - .saturating_add((5_414_000 as Weight).saturating_mul(r as Weight)) - + (26_534_000 as Weight) + .saturating_add((5_375_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (24_619_000 as Weight) - .saturating_add((5_434_000 as Weight).saturating_mul(r as Weight)) - + (26_560_000 as Weight) + .saturating_add((5_284_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (24_654_000 as Weight) - .saturating_add((5_483_000 as Weight).saturating_mul(r as Weight)) - + (26_554_000 as Weight) + .saturating_add((5_358_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (24_690_000 as Weight) - .saturating_add((7_485_000 as Weight).saturating_mul(r as Weight)) - + (26_549_000 as Weight) + .saturating_add((7_402_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (24_652_000 as Weight) - .saturating_add((7_468_000 as Weight).saturating_mul(r as Weight)) - + (26_582_000 as Weight) + .saturating_add((7_266_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (24_667_000 as Weight) - .saturating_add((7_426_000 as Weight).saturating_mul(r as Weight)) - + (26_558_000 as Weight) + .saturating_add((7_293_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (24_693_000 as Weight) - .saturating_add((7_393_000 as Weight).saturating_mul(r as Weight)) - + (26_569_000 as Weight) + .saturating_add((7_278_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (24_675_000 as Weight) - .saturating_add((7_407_000 as Weight).saturating_mul(r as Weight)) - + (26_516_000 as Weight) + .saturating_add((7_334_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (24_697_000 as Weight) - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) - + (26_561_000 as Weight) + .saturating_add((7_283_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (24_646_000 as Weight) - .saturating_add((7_420_000 as Weight).saturating_mul(r as Weight)) - + (26_589_000 as Weight) + .saturating_add((7_244_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (24_683_000 as Weight) - .saturating_add((7_404_000 as Weight).saturating_mul(r as Weight)) - + (26_593_000 as Weight) + .saturating_add((7_318_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (24_685_000 as Weight) - .saturating_add((7_461_000 as Weight).saturating_mul(r as Weight)) - + (26_626_000 as Weight) + .saturating_add((7_348_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (25_147_000 as Weight) - .saturating_add((7_003_000 as Weight).saturating_mul(r as Weight)) - + (26_595_000 as Weight) + .saturating_add((7_330_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (24_705_000 as Weight) - .saturating_add((7_483_000 as Weight).saturating_mul(r as Weight)) - + (26_568_000 as Weight) + .saturating_add((8_657_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (24_675_000 as Weight) - .saturating_add((7_377_000 as Weight).saturating_mul(r as Weight)) - + (27_393_000 as Weight) + .saturating_add((6_743_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (24_680_000 as Weight) - .saturating_add((7_376_000 as Weight).saturating_mul(r as Weight)) - + (26_571_000 as Weight) + .saturating_add((7_329_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (24_660_000 as Weight) - .saturating_add((13_091_000 as Weight).saturating_mul(r as Weight)) - + (26_585_000 as Weight) + .saturating_add((12_977_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (24_643_000 as Weight) - .saturating_add((12_109_000 as Weight).saturating_mul(r as Weight)) - + (26_554_000 as Weight) + .saturating_add((11_955_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (24_615_000 as Weight) - .saturating_add((13_049_000 as Weight).saturating_mul(r as Weight)) - + (26_570_000 as Weight) + .saturating_add((12_903_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (24_696_000 as Weight) - .saturating_add((12_039_000 as Weight).saturating_mul(r as Weight)) - + (26_561_000 as Weight) + .saturating_add((12_112_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (24_683_000 as Weight) - .saturating_add((7_314_000 as Weight).saturating_mul(r as Weight)) - + (26_587_000 as Weight) + .saturating_add((7_411_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (24_657_000 as Weight) - .saturating_add((7_401_000 as Weight).saturating_mul(r as Weight)) - + (26_588_000 as Weight) + .saturating_add((7_479_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (24_661_000 as Weight) - .saturating_add((7_347_000 as Weight).saturating_mul(r as Weight)) - + (26_541_000 as Weight) + .saturating_add((7_386_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (24_644_000 as Weight) - .saturating_add((7_389_000 as Weight).saturating_mul(r as Weight)) - + (26_562_000 as Weight) + .saturating_add((7_263_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (24_643_000 as Weight) - .saturating_add((7_416_000 as Weight).saturating_mul(r as Weight)) - + (26_569_000 as Weight) + .saturating_add((7_353_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (24_634_000 as Weight) - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) - + (26_533_000 as Weight) + .saturating_add((7_342_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (24_618_000 as Weight) - .saturating_add((7_452_000 as Weight).saturating_mul(r as Weight)) - + (26_545_000 as Weight) + .saturating_add((7_362_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (24_618_000 as Weight) - .saturating_add((7_447_000 as Weight).saturating_mul(r as Weight)) - + (26_535_000 as Weight) + .saturating_add((7_330_000 as Weight).saturating_mul(r as Weight)) } - } diff --git a/frame/democracy/README.md b/frame/democracy/README.md index ffbf2f36a176006a9308cd9fe295da0a89fff62a..6a390cc048e1cdddfc0eeb3702dda22ad5155b9a 100644 --- a/frame/democracy/README.md +++ b/frame/democracy/README.md @@ -132,4 +132,4 @@ This call can only be made by the `VetoOrigin`. - `cancel_queued` - Cancels a proposal that is queued for enactment. - `clear_public_proposal` - Removes all public proposals. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index b5de1a91c17ad9abc7e46b3ebaf3b0d77f666de7..542bfaa79db119cd08e52948c953a7e6c4dd0674 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -34,21 +34,21 @@ const MAX_REFERENDUMS: u32 = 99; const MAX_SECONDERS: u32 = 100; const MAX_BYTES: u32 = 16_384; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = System::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn add_proposal(n: u32) -> Result { +fn add_proposal(n: u32) -> Result { let other = funded_account::("proposer", n); let value = T::MinimumDeposit::get(); let proposal_hash: T::Hash = T::Hashing::hash_of(&n); @@ -62,7 +62,7 @@ fn add_proposal(n: u32) -> Result { Ok(proposal_hash) } -fn add_referendum(n: u32) -> Result { +fn add_referendum(n: u32) -> Result { let proposal_hash: T::Hash = T::Hashing::hash_of(&n); let vote_threshold = VoteThreshold::SimpleMajority; @@ -84,7 +84,7 @@ fn add_referendum(n: u32) -> Result { Ok(referendum_index) } -fn account_vote(b: BalanceOf) -> AccountVote> { +fn account_vote(b: BalanceOf) -> AccountVote> { let v = Vote { aye: true, conviction: Conviction::Locked1x, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index fa8d07fd78db795a7eda902c037725d0cd31712d..70383beaa0655e2c10c13a37e0cfad112dc8802e 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -17,7 +17,7 @@ //! # Democracy Pallet //! -//! - [`democracy::Trait`](./trait.Trait.html) +//! - [`democracy::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -199,13 +199,13 @@ pub type PropIndex = u32; /// A referendum index. pub type ReferendumIndex = u32; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait + Sized { +pub trait Config: frame_system::Config + Sized { type Proposal: Parameter + Dispatchable + From>; - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Currency type for this module. type Currency: ReservableCurrency @@ -338,7 +338,7 @@ enum Releases { } decl_storage! { - trait Store for Module as Democracy { + trait Store for Module as Democracy { // TODO: Refactor public proposal queue into its own pallet. // https://github.com/paritytech/substrate/issues/5322 /// The number of (public) proposals that have been made so far. @@ -413,9 +413,9 @@ decl_storage! { decl_event! { pub enum Event where Balance = BalanceOf, - ::AccountId, - ::Hash, - ::BlockNumber, + ::AccountId, + ::Hash, + ::BlockNumber, { /// A motion has been proposed by a public account. \[proposal_index, deposit\] Proposed(PropIndex, Balance), @@ -461,7 +461,7 @@ decl_event! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Value too low ValueLow, /// Proposal does not exist @@ -537,7 +537,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The minimum period of locking and the period between a proposal being approved and enacted. @@ -1086,7 +1086,7 @@ decl_module! { } /// Enact a proposal from a referendum. For now we just make the weight be the maximum. - #[weight = T::MaximumBlockWeight::get()] + #[weight = T::BlockWeights::get().max_block] fn enact_proposal(origin, proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { ensure_root(origin)?; Self::do_enact_proposal(proposal_hash, index) @@ -1168,7 +1168,7 @@ decl_module! { } } -impl Module { +impl Module { // exposed immutables. /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal @@ -1609,6 +1609,7 @@ impl Module { /// - Db reads per R: `DepositOf`, `ReferendumInfoOf` /// # fn begin_block(now: T::BlockNumber) -> Result { + let max_block_weight = T::BlockWeights::get().max_block; let mut weight = 0; // pick out another public referendum if it's time. @@ -1616,7 +1617,7 @@ impl Module { // Errors come from the queue being empty. we don't really care about that, and even if // we did, there is nothing we can do here. let _ = Self::launch_next(now); - weight = T::MaximumBlockWeight::get(); + weight = max_block_weight; } let next = Self::lowest_unbaked(); @@ -1627,7 +1628,7 @@ impl Module { for (index, info) in Self::maturing_referenda_at_inner(now, next..last).into_iter() { let approved = Self::bake_referendum(now, index, info)?; ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); - weight = T::MaximumBlockWeight::get(); + weight = max_block_weight; } Ok(weight) diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 25209901109fae484c275bc546215b730d69d972..dae3a262209ea1e6c4e39b34920bac96c47e4ecb 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -88,12 +88,14 @@ impl Filter for BaseFilter { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1_000_000; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1_000_000); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -105,13 +107,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -120,9 +115,9 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } -impl pallet_scheduler::Trait for Test { +impl pallet_scheduler::Config for Test { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; @@ -135,7 +130,7 @@ impl pallet_scheduler::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = Event; @@ -173,7 +168,7 @@ impl Contains for OneToFive { fn add(_m: &u64) {} } -impl super::Trait for Test { +impl super::Config for Test { type Proposal = Call; type Event = Event; type Currency = pallet_balances::Module; @@ -242,7 +237,7 @@ fn set_balance_proposal(value: u64) -> Vec { fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { let call = Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); } } diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index e386e5fb55313780ea566fef6605769355be0d01..06899b47dea714c7662fab8069e809eeca4fc09e 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -72,7 +72,7 @@ pub trait WeightInfo { /// Weights for pallet_democracy using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn propose() -> Weight { (87_883_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) diff --git a/frame/elections-phragmen/README.md b/frame/elections-phragmen/README.md index 5507d539706328853238de2e86e6ca567adf6dc7..8c5940ea2d78ea3cc648ec68fe1fecf8ecfad858 100644 --- a/frame/elections-phragmen/README.md +++ b/frame/elections-phragmen/README.md @@ -64,4 +64,4 @@ being re-elected at the end of each round. - [`Call`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/enum.Call.html) - [`Module`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/struct.Module.html) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index e7c3719480b707896e538292e840f47cc730542e..eaa5bbe9ed4fb15ace9317782222ed9e81a222c4 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -30,7 +30,7 @@ const BALANCE_FACTOR: u32 = 250; const MAX_VOTERS: u32 = 500; const MAX_CANDIDATES: u32 = 200; -type Lookup = <::Lookup as StaticLookup>::Source; +type Lookup = <::Lookup as StaticLookup>::Source; macro_rules! whitelist { ($acc:ident) => { @@ -41,7 +41,7 @@ macro_rules! whitelist { } /// grab new account with infinite balance. -fn endowed_account(name: &'static str, index: u32) -> T::AccountId { +fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); let amount = default_stake::(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); @@ -53,28 +53,28 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { } /// Account to lookup type of system trait. -fn as_lookup(account: T::AccountId) -> Lookup { +fn as_lookup(account: T::AccountId) -> Lookup { T::Lookup::unlookup(account) } /// Get a reasonable amount of stake based on the execution trait's configuration -fn default_stake(factor: u32) -> BalanceOf { +fn default_stake(factor: u32) -> BalanceOf { let factor = BalanceOf::::from(factor); T::Currency::minimum_balance() * factor } /// Get the current number of candidates. -fn candidate_count() -> u32 { +fn candidate_count() -> u32 { >::decode_len().unwrap_or(0usize) as u32 } /// Get the number of votes of a voter. -fn vote_count_of(who: &T::AccountId) -> u32 { +fn vote_count_of(who: &T::AccountId) -> u32 { >::get(who).1.len() as u32 } /// A `DefunctVoter` struct with correct value -fn defunct_for(who: T::AccountId) -> DefunctVoter> { +fn defunct_for(who: T::AccountId) -> DefunctVoter> { DefunctVoter { who: as_lookup::(who.clone()), candidate_count: candidate_count::(), @@ -83,7 +83,7 @@ fn defunct_for(who: T::AccountId) -> DefunctVoter> { } /// Add `c` new candidates. -fn submit_candidates(c: u32, prefix: &'static str) +fn submit_candidates(c: u32, prefix: &'static str) -> Result, &'static str> { (0..c).map(|i| { @@ -97,7 +97,7 @@ fn submit_candidates(c: u32, prefix: &'static str) } /// Add `c` new candidates with self vote. -fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) +fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) -> Result, &'static str> { let candidates = submit_candidates::(c, prefix)?; @@ -110,7 +110,7 @@ fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) /// Submit one voter. -fn submit_voter(caller: T::AccountId, votes: Vec, stake: BalanceOf) +fn submit_voter(caller: T::AccountId, votes: Vec, stake: BalanceOf) -> Result<(), sp_runtime::DispatchError> { >::vote(RawOrigin::Signed(caller).into(), votes, stake) @@ -118,7 +118,7 @@ fn submit_voter(caller: T::AccountId, votes: Vec, stake: /// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if /// available. -fn distribute_voters(mut all_candidates: Vec, num_voters: u32, votes: usize) +fn distribute_voters(mut all_candidates: Vec, num_voters: u32, votes: usize) -> Result<(), &'static str> { let stake = default_stake::(BALANCE_FACTOR); @@ -138,7 +138,7 @@ fn distribute_voters(mut all_candidates: Vec, num_voters /// Fill the seats of members and runners-up up until `m`. Note that this might include either only /// members, or members and runners-up. -fn fill_seats_up_to(m: u32) -> Result, &'static str> { +fn fill_seats_up_to(m: u32) -> Result, &'static str> { let _ = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; assert_eq!(>::candidates().len() as u32, m, "wrong number of candidates."); >::do_phragmen(); @@ -158,7 +158,7 @@ fn fill_seats_up_to(m: u32) -> Result, &'static str> } /// removes all the storage items to reverse any genesis state. -fn clean() { +fn clean() { >::kill(); >::kill(); >::kill(); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 8279f9cf11f165d7bc9b00d08d3c39be06e03b82..db2428971cc5cd0f7c9f12369221996306b8be5e 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -77,7 +77,7 @@ //! //! ### Module Information //! -//! - [`election_sp_phragmen::Trait`](./trait.Trait.html) +//! - [`election_sp_phragmen::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) @@ -112,9 +112,9 @@ pub use weights::WeightInfo; pub const MAXIMUM_VOTE: usize = 16; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// An indication that the renouncing account currently has which of the below roles. #[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)] @@ -140,9 +140,9 @@ pub struct DefunctVoter { pub candidate_count: u32 } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type.c - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Identifier for the elections-phragmen pallet's lock type ModuleId: Get; @@ -193,7 +193,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as PhragmenElection { + trait Store for Module as PhragmenElection { // ---- State /// The current elected membership. Sorted based on account id. pub Members get(fn members): Vec<(T::AccountId, BalanceOf)>; @@ -251,7 +251,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Cannot vote when no candidates or members exist. UnableToVote, /// Must vote for at least one candidate. @@ -290,7 +290,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -621,7 +621,7 @@ decl_module! { #[weight = if *has_replacement { T::WeightInfo::remove_member_with_replacement() } else { - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block }] fn remove_member( origin, @@ -667,7 +667,7 @@ decl_module! { decl_event!( pub enum Event where Balance = BalanceOf, - ::AccountId, + ::AccountId, { /// A new term with \[new_members\]. This indicates that enough candidates existed to run the /// election, not that enough have has been elected. The inner value must be examined for @@ -694,7 +694,7 @@ decl_event!( } ); -impl Module { +impl Module { /// Attempts to remove a member `who`. If a runner-up exists, it is used as the replacement and /// Ok(true). is returned. /// @@ -829,7 +829,7 @@ impl Module { if !Self::term_duration().is_zero() { if (block_number % Self::term_duration()).is_zero() { Self::do_phragmen(); - return T::MaximumBlockWeight::get() + return T::BlockWeights::get().max_block; } } 0 @@ -1027,7 +1027,7 @@ impl Module { } } -impl Contains for Module { +impl Contains for Module { fn contains(who: &T::AccountId) -> bool { Self::is_member(who) } @@ -1046,7 +1046,7 @@ impl Contains for Module { } } -impl ContainsLengthBound for Module { +impl ContainsLengthBound for Module { fn min_len() -> usize { 0 } /// Implementation uses a parameter type so calling is cost-free. @@ -1058,26 +1058,26 @@ impl ContainsLengthBound for Module { #[cfg(test)] mod tests { use super::*; - use frame_support::{assert_ok, assert_noop, assert_err_with_weight, parameter_types, - weights::Weight, - }; + use frame_support::{assert_ok, assert_noop, assert_err_with_weight, parameter_types}; use substrate_test_utils::assert_eq_uvec; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, BuildStorage, DispatchResult, + testing::Header, BuildStorage, DispatchResult, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, }; use crate as elections_phragmen; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -1089,13 +1089,6 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -1108,7 +1101,7 @@ mod tests { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type Event = Event; type DustRemoval = (); @@ -1175,7 +1168,7 @@ mod tests { pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; } - impl Trait for Test { + impl Config for Test { type ModuleId = ElectionsPhragmenModuleId; type Event = Event; type Currency = Balances; diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 2702aec0a01cb147e73b63698b53806d1e66b7e7..48fd40e782e4f33f3621b8caeb2ec1d6cc7cef9b 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -59,7 +59,7 @@ pub trait WeightInfo { /// Weights for pallet_elections_phragmen using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn vote(v: u32, ) -> Weight { (89_627_000 as Weight) .saturating_add((197_000 as Weight).saturating_mul(v as Weight)) diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index dccc42f24417b7dd25a5dba35adb8043c9ceeed4..1490b6d86aeb49ae863225c575b88c45cd546f31 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -139,9 +139,9 @@ pub const VOTER_SET_SIZE: usize = 64; /// NUmber of approvals grouped in one chunk. pub const APPROVAL_SET_SIZE: usize = 8; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// Index used to access chunks. type SetIndex = u32; @@ -152,8 +152,8 @@ type ApprovalFlag = u32; /// Number of approval flags that can fit into [`ApprovalFlag`] type. const APPROVAL_FLAG_LEN: usize = 32; -pub trait Trait: frame_system::Trait { - type Event: From> + Into<::Event>; +pub trait Config: frame_system::Config { + type Event: From> + Into<::Event>; /// Identifier for the elections pallet's lock type ModuleId: Get; @@ -218,7 +218,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Elections { + trait Store for Module as Elections { // ---- parameters /// How long to give each top candidate to present themselves after the vote ends. @@ -286,7 +286,7 @@ decl_storage! { decl_error! { /// Error for the elections module. - pub enum Error for Module { + pub enum Error for Module { /// Reporter must be a voter. NotVoter, /// Target for inactivity cleanup must be active. @@ -345,7 +345,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// How much should be locked up in order to submit one's candidacy. A reasonable @@ -706,7 +706,7 @@ decl_module! { } decl_event!( - pub enum Event where ::AccountId { + pub enum Event where ::AccountId { /// Reaped \[voter, reaper\]. VoterReaped(AccountId, AccountId), /// Slashed \[reaper\]. @@ -719,7 +719,7 @@ decl_event!( } ); -impl Module { +impl Module { // exposed immutables. /// True if we're currently in a presentation period. diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index 0d57089af5ef81f02634f5b99f17fd5aed7cc694..482c905f89c1421020b1b7ed8d66385719a99779 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -22,23 +22,24 @@ use frame_support::{ StorageValue, StorageMap, parameter_types, assert_ok, traits::{ChangeMembers, Currency, LockIdentifier}, - weights::Weight, }; use sp_core::H256; use sp_runtime::{ - Perbill, BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, + BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, }; use crate as elections; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -50,13 +51,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -68,7 +62,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); @@ -110,7 +104,7 @@ parameter_types!{ pub const ElectionModuleId: LockIdentifier = *b"py/elect"; } -impl elections::Trait for Test { +impl elections::Config for Test { type Event = Event; type Currency = Balances; type BadPresentation = (); diff --git a/frame/elections/src/tests.rs b/frame/elections/src/tests.rs index d3579ca337436b5ba2dab36840c9f924c36a82ee..38a16953572f452c558e85b986d32db78758988f 100644 --- a/frame/elections/src/tests.rs +++ b/frame/elections/src/tests.rs @@ -680,8 +680,8 @@ fn retracting_active_voter_should_slash_reporter() { assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::vote_index(), 2); - assert_eq!(::InactiveGracePeriod::get(), 1); - assert_eq!(::VotingPeriod::get(), 4); + assert_eq!(::InactiveGracePeriod::get(), 1); + assert_eq!(::VotingPeriod::get(), 4); assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 1, last_active: 0, stake: 40, pot: 0 })); assert_ok!(Elections::reap_inactive_voter(Origin::signed(4), diff --git a/frame/evm/README.md b/frame/evm/README.md deleted file mode 100644 index 499a0761cfa9d6ca2b144f9ab85c67d1cf25226a..0000000000000000000000000000000000000000 --- a/frame/evm/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# EVM Module - -The EVM module allows unmodified EVM code to be executed in a Substrate-based blockchain. -- [`evm::Trait`](https://docs.rs/pallet-evm/2.0.0/pallet_evm/trait.Trait.html) - -## EVM Engine - -The EVM module uses [`SputnikVM`](https://github.com/rust-blockchain/evm) as the underlying EVM engine. The engine is overhauled so that it's [`modular`](https://github.com/corepaper/evm). - -## Execution Lifecycle - -There are a separate set of accounts managed by the EVM module. Substrate based accounts can call the EVM Module to deposit or withdraw balance from the Substrate base-currency into a different balance managed and used by the EVM module. Once a user has populated their balance, they can create and call smart contracts using this module. - -There's one-to-one mapping from Substrate accounts and EVM external accounts that is defined by a conversion function. - -## EVM Module vs Ethereum Network - -The EVM module should be able to produce nearly identical results compared to the Ethereum mainnet, including gas cost and balance changes. - -Observable differences include: - -- The available length of block hashes may not be 256 depending on the configuration of the System module in the Substrate runtime. -- Difficulty and coinbase, which do not make sense in this module and is currently hard coded to zero. - -We currently do not aim to make unobservable behaviors, such as state root, to be the same. We also don't aim to follow the exact same transaction / receipt format. However, given one Ethereum transaction and one Substrate account's private key, one should be able to convert any Ethereum transaction into a transaction compatible with this module. - -The gas configurations are configurable. Right now, a pre-defined Istanbul hard fork configuration option is provided. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/evm/src/backend.rs b/frame/evm/src/backend.rs deleted file mode 100644 index b625c0c548026dc348bcece4cb7b8e000bf43789..0000000000000000000000000000000000000000 --- a/frame/evm/src/backend.rs +++ /dev/null @@ -1,216 +0,0 @@ -use sp_std::marker::PhantomData; -use sp_std::vec::Vec; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_core::{U256, H256, H160}; -use sp_runtime::traits::UniqueSaturatedInto; -use frame_support::traits::Get; -use frame_support::{debug, storage::{StorageMap, StorageDoubleMap}}; -use sha3::{Keccak256, Digest}; -use evm::backend::{Backend as BackendT, ApplyBackend, Apply}; -use crate::{Trait, AccountStorages, AccountCodes, Module, Event}; - -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// Ethereum account nonce, balance and code. Used by storage. -pub struct Account { - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, -} - -#[derive(Clone, Eq, PartialEq, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// Ethereum log. Used for `deposit_event`. -pub struct Log { - /// Source address of the log. - pub address: H160, - /// Topics of the log. - pub topics: Vec, - /// Byte array data of the log. - pub data: Vec, -} - -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// External input from the transaction. -pub struct Vicinity { - /// Current transaction gas price. - pub gas_price: U256, - /// Origin of the transaction. - pub origin: H160, -} - -/// Substrate backend for EVM. -pub struct Backend<'vicinity, T> { - vicinity: &'vicinity Vicinity, - _marker: PhantomData, -} - -impl<'vicinity, T> Backend<'vicinity, T> { - /// Create a new backend with given vicinity. - pub fn new(vicinity: &'vicinity Vicinity) -> Self { - Self { vicinity, _marker: PhantomData } - } -} - -impl<'vicinity, T: Trait> BackendT for Backend<'vicinity, T> { - fn gas_price(&self) -> U256 { self.vicinity.gas_price } - fn origin(&self) -> H160 { self.vicinity.origin } - - fn block_hash(&self, number: U256) -> H256 { - if number > U256::from(u32::max_value()) { - H256::default() - } else { - let number = T::BlockNumber::from(number.as_u32()); - H256::from_slice(frame_system::Module::::block_hash(number).as_ref()) - } - } - - fn block_number(&self) -> U256 { - let number: u128 = frame_system::Module::::block_number().unique_saturated_into(); - U256::from(number) - } - - fn block_coinbase(&self) -> H160 { - H160::default() - } - - fn block_timestamp(&self) -> U256 { - let now: u128 = pallet_timestamp::Module::::get().unique_saturated_into(); - U256::from(now / 1000) - } - - fn block_difficulty(&self) -> U256 { - U256::zero() - } - - fn block_gas_limit(&self) -> U256 { - U256::zero() - } - - fn chain_id(&self) -> U256 { - U256::from(T::ChainId::get()) - } - - fn exists(&self, _address: H160) -> bool { - true - } - - fn basic(&self, address: H160) -> evm::backend::Basic { - let account = Module::::account_basic(&address); - - evm::backend::Basic { - balance: account.balance, - nonce: account.nonce, - } - } - - fn code_size(&self, address: H160) -> usize { - AccountCodes::decode_len(&address).unwrap_or(0) - } - - fn code_hash(&self, address: H160) -> H256 { - H256::from_slice(Keccak256::digest(&AccountCodes::get(&address)).as_slice()) - } - - fn code(&self, address: H160) -> Vec { - AccountCodes::get(&address) - } - - fn storage(&self, address: H160, index: H256) -> H256 { - AccountStorages::get(address, index) - } -} - -impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { - fn apply( - &mut self, - values: A, - logs: L, - delete_empty: bool, - ) where - A: IntoIterator>, - I: IntoIterator, - L: IntoIterator, - { - for apply in values { - match apply { - Apply::Modify { - address, basic, code, storage, reset_storage, - } => { - Module::::mutate_account_basic(&address, Account { - nonce: basic.nonce, - balance: basic.balance, - }); - - if let Some(code) = code { - debug::debug!( - target: "evm", - "Inserting code ({} bytes) at {:?}", - code.len(), - address - ); - AccountCodes::insert(address, code); - } - - if reset_storage { - AccountStorages::remove_prefix(address); - } - - for (index, value) in storage { - if value == H256::default() { - debug::debug!( - target: "evm", - "Removing storage for {:?} [index: {:?}]", - address, - index - ); - AccountStorages::remove(address, index); - } else { - debug::debug!( - target: "evm", - "Updating storage for {:?} [index: {:?}, value: {:?}]", - address, - index, - value - ); - AccountStorages::insert(address, index, value); - } - } - - if delete_empty { - Module::::remove_account_if_empty(&address); - } - }, - Apply::Delete { address } => { - debug::debug!( - target: "evm", - "Deleting account at {:?}", - address - ); - Module::::remove_account(&address) - }, - } - } - - for log in logs { - debug::trace!( - target: "evm", - "Inserting log for {:?}, topics ({}) {:?}, data ({}): {:?}]", - log.address, - log.topics.len(), - log.topics, - log.data.len(), - log.data - ); - Module::::deposit_event(Event::::Log(Log { - address: log.address, - topics: log.topics, - data: log.data, - })); - } - } -} diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs deleted file mode 100644 index e7812a55482fd8fbba565ac54e4018a884fb32fe..0000000000000000000000000000000000000000 --- a/frame/evm/src/lib.rs +++ /dev/null @@ -1,678 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # EVM Module -//! -//! The EVM module allows unmodified EVM code to be executed in a Substrate-based blockchain. -//! - [`evm::Trait`] -//! -//! ## EVM Engine -//! -//! The EVM module uses [`SputnikVM`](https://github.com/rust-blockchain/evm) as the underlying EVM engine. -//! The engine is overhauled so that it's [`modular`](https://github.com/corepaper/evm). -//! -//! ## Execution Lifecycle -//! -//! There are a separate set of accounts managed by the EVM module. Substrate based accounts can call the EVM Module -//! to deposit or withdraw balance from the Substrate base-currency into a different balance managed and used by -//! the EVM module. Once a user has populated their balance, they can create and call smart contracts using this module. -//! -//! There's one-to-one mapping from Substrate accounts and EVM external accounts that is defined by a conversion function. -//! -//! ## EVM Module vs Ethereum Network -//! -//! The EVM module should be able to produce nearly identical results compared to the Ethereum mainnet, -//! including gas cost and balance changes. -//! -//! Observable differences include: -//! -//! - The available length of block hashes may not be 256 depending on the configuration of the System module -//! in the Substrate runtime. -//! - Difficulty and coinbase, which do not make sense in this module and is currently hard coded to zero. -//! -//! We currently do not aim to make unobservable behaviors, such as state root, to be the same. We also don't aim to follow -//! the exact same transaction / receipt format. However, given one Ethereum transaction and one Substrate account's -//! private key, one should be able to convert any Ethereum transaction into a transaction compatible with this module. -//! -//! The gas configurations are configurable. Right now, a pre-defined Istanbul hard fork configuration option is provided. - -// Ensure we're `no_std` when compiling for Wasm. -#![cfg_attr(not(feature = "std"), no_std)] - -mod backend; -mod tests; -pub mod precompiles; - -pub use crate::precompiles::{Precompile, Precompiles}; -pub use crate::backend::{Account, Log, Vicinity, Backend}; - -use sp_std::vec::Vec; -#[cfg(feature = "std")] -use codec::{Encode, Decode}; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use frame_support::{debug, ensure, decl_module, decl_storage, decl_event, decl_error}; -use frame_support::weights::{Weight, Pays}; -use frame_support::traits::{Currency, ExistenceRequirement, Get}; -use frame_support::dispatch::DispatchResultWithPostInfo; -use frame_system::RawOrigin; -use sp_core::{U256, H256, H160, Hasher}; -use sp_runtime::{AccountId32, traits::{UniqueSaturatedInto, SaturatedConversion, BadOrigin}}; -use sha3::{Digest, Keccak256}; -pub use evm::{ExitReason, ExitSucceed, ExitError, ExitRevert, ExitFatal}; -use evm::Config; -use evm::executor::StackExecutor; -use evm::backend::ApplyBackend; - -/// Type alias for currency balance. -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -/// Trait that outputs the current transaction gas price. -pub trait FeeCalculator { - /// Return the minimal required gas price. - fn min_gas_price() -> U256; -} - -impl FeeCalculator for () { - fn min_gas_price() -> U256 { U256::zero() } -} - -pub trait EnsureAddressOrigin { - /// Success return type. - type Success; - - /// Perform the origin check. - fn ensure_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - Self::try_address_origin(address, origin).map_err(|_| BadOrigin) - } - - /// Try with origin. - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result; -} - -/// Ensure that the EVM address is the same as the Substrate address. This only works if the account -/// ID is `H160`. -pub struct EnsureAddressSame; - -impl EnsureAddressOrigin for EnsureAddressSame where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = H160; - - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - origin.into().and_then(|o| match o { - RawOrigin::Signed(who) if &who == address => Ok(who), - r => Err(OuterOrigin::from(r)) - }) - } -} - -/// Ensure that the origin is root. -pub struct EnsureAddressRoot(sp_std::marker::PhantomData); - -impl EnsureAddressOrigin for EnsureAddressRoot where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = (); - - fn try_address_origin( - _address: &H160, - origin: OuterOrigin, - ) -> Result<(), OuterOrigin> { - origin.into().and_then(|o| match o { - RawOrigin::Root => Ok(()), - r => Err(OuterOrigin::from(r)), - }) - } -} - -/// Ensure that the origin never happens. -pub struct EnsureAddressNever(sp_std::marker::PhantomData); - -impl EnsureAddressOrigin for EnsureAddressNever { - type Success = AccountId; - - fn try_address_origin( - _address: &H160, - origin: OuterOrigin, - ) -> Result { - Err(origin) - } -} - -/// Ensure that the address is truncated hash of the origin. Only works if the account id is -/// `AccountId32`. -pub struct EnsureAddressTruncated; - -impl EnsureAddressOrigin for EnsureAddressTruncated where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = AccountId32; - - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - origin.into().and_then(|o| match o { - RawOrigin::Signed(who) - if AsRef::<[u8; 32]>::as_ref(&who)[0..20] == address[0..20] => Ok(who), - r => Err(OuterOrigin::from(r)) - }) - } -} - -pub trait AddressMapping { - fn into_account_id(address: H160) -> A; -} - -/// Identity address mapping. -pub struct IdentityAddressMapping; - -impl AddressMapping for IdentityAddressMapping { - fn into_account_id(address: H160) -> H160 { address } -} - -/// Hashed address mapping. -pub struct HashedAddressMapping(sp_std::marker::PhantomData); - -impl> AddressMapping for HashedAddressMapping { - fn into_account_id(address: H160) -> AccountId32 { - let mut data = [0u8; 24]; - data[0..4].copy_from_slice(b"evm:"); - data[4..24].copy_from_slice(&address[..]); - let hash = H::hash(&data); - - AccountId32::from(Into::<[u8; 32]>::into(hash)) - } -} - -/// Substrate system chain ID. -pub struct SystemChainId; - -impl Get for SystemChainId { - fn get() -> u64 { - sp_io::misc::chain_id() - } -} - -static ISTANBUL_CONFIG: Config = Config::istanbul(); - -/// EVM module trait -pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { - /// Calculator for current gas price. - type FeeCalculator: FeeCalculator; - - /// Allow the origin to call on behalf of given address. - type CallOrigin: EnsureAddressOrigin; - /// Allow the origin to withdraw on behalf of given address. - type WithdrawOrigin: EnsureAddressOrigin; - - /// Mapping from address to account id. - type AddressMapping: AddressMapping; - /// Currency type for withdraw and balance storage. - type Currency: Currency; - - /// The overarching event type. - type Event: From> + Into<::Event>; - /// Precompiles associated with this EVM engine. - type Precompiles: Precompiles; - /// Chain ID of EVM. - type ChainId: Get; - - /// EVM config used in the module. - fn config() -> &'static Config { - &ISTANBUL_CONFIG - } -} - -#[cfg(feature = "std")] -#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, Serialize, Deserialize)] -/// Account definition used for genesis block construction. -pub struct GenesisAccount { - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, - /// Full account storage. - pub storage: std::collections::BTreeMap, - /// Account code. - pub code: Vec, -} - -decl_storage! { - trait Store for Module as EVM { - AccountCodes get(fn account_codes): map hasher(blake2_128_concat) H160 => Vec; - AccountStorages get(fn account_storages): - double_map hasher(blake2_128_concat) H160, hasher(blake2_128_concat) H256 => H256; - } - - add_extra_genesis { - config(accounts): std::collections::BTreeMap; - build(|config: &GenesisConfig| { - for (address, account) in &config.accounts { - Module::::mutate_account_basic(&address, Account { - balance: account.balance, - nonce: account.nonce, - }); - AccountCodes::insert(address, &account.code); - - for (index, value) in &account.storage { - AccountStorages::insert(address, index, value); - } - } - }); - } -} - -decl_event! { - /// EVM events - pub enum Event where - ::AccountId, - { - /// Ethereum events from contracts. - Log(Log), - /// A contract has been created at given \[address\]. - Created(H160), - /// A \[contract\] was attempted to be created, but the execution failed. - CreatedFailed(H160), - /// A \[contract\] has been executed successfully with states applied. - Executed(H160), - /// A \[contract\] has been executed with errors. States are reverted with only gas fees applied. - ExecutedFailed(H160), - /// A deposit has been made at a given address. \[sender, address, value\] - BalanceDeposit(AccountId, H160, U256), - /// A withdrawal has been made from a given address. \[sender, address, value\] - BalanceWithdraw(AccountId, H160, U256), - } -} - -decl_error! { - pub enum Error for Module { - /// Not enough balance to perform action - BalanceLow, - /// Calculating total fee overflowed - FeeOverflow, - /// Calculating total payment overflowed - PaymentOverflow, - /// Withdraw fee failed - WithdrawFailed, - /// Gas price is too low. - GasPriceTooLow, - /// Nonce is invalid - InvalidNonce, - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Withdraw balance from EVM into currency/balances module. - #[weight = 0] - fn withdraw(origin, address: H160, value: BalanceOf) { - let destination = T::WithdrawOrigin::ensure_address_origin(&address, origin)?; - let address_account_id = T::AddressMapping::into_account_id(address); - - T::Currency::transfer( - &address_account_id, - &destination, - value, - ExistenceRequirement::AllowDeath - )?; - } - - /// Issue an EVM call operation. This is similar to a message call transaction in Ethereum. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn call( - origin, - source: H160, - target: H160, - input: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_call( - source, - target, - input, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), _, _, _) => { - Module::::deposit_event(Event::::Executed(target)); - }, - (_, _, _, _) => { - Module::::deposit_event(Event::::ExecutedFailed(target)); - }, - } - - Ok(Pays::No.into()) - } - - /// Issue an EVM create operation. This is similar to a contract creation transaction in - /// Ethereum. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn create( - origin, - source: H160, - init: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_create( - source, - init, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), create_address, _, _) => { - Module::::deposit_event(Event::::Created(create_address)); - }, - (_, create_address, _, _) => { - Module::::deposit_event(Event::::CreatedFailed(create_address)); - }, - } - - Ok(Pays::No.into()) - } - - /// Issue an EVM create2 operation. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn create2( - origin, - source: H160, - init: Vec, - salt: H256, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_create2( - source, - init, - salt, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), create_address, _, _) => { - Module::::deposit_event(Event::::Created(create_address)); - }, - (_, create_address, _, _) => { - Module::::deposit_event(Event::::CreatedFailed(create_address)); - }, - } - - Ok(Pays::No.into()) - } - } -} - -impl Module { - fn remove_account(address: &H160) { - AccountCodes::remove(address); - AccountStorages::remove_prefix(address); - } - - fn mutate_account_basic(address: &H160, new: Account) { - let account_id = T::AddressMapping::into_account_id(*address); - let current = Self::account_basic(address); - - if current.nonce < new.nonce { - // ASSUME: in one single EVM transaction, the nonce will not increase more than - // `u128::max_value()`. - for _ in 0..(new.nonce - current.nonce).low_u128() { - frame_system::Module::::inc_account_nonce(&account_id); - } - } - - if current.balance > new.balance { - let diff = current.balance - new.balance; - T::Currency::slash(&account_id, diff.low_u128().unique_saturated_into()); - } else if current.balance < new.balance { - let diff = new.balance - current.balance; - T::Currency::deposit_creating(&account_id, diff.low_u128().unique_saturated_into()); - } - } - - /// Check whether an account is empty. - pub fn is_account_empty(address: &H160) -> bool { - let account = Self::account_basic(address); - let code_len = AccountCodes::decode_len(address).unwrap_or(0); - - account.nonce == U256::zero() && - account.balance == U256::zero() && - code_len == 0 - } - - /// Remove an account if its empty. - pub fn remove_account_if_empty(address: &H160) { - if Self::is_account_empty(address) { - Self::remove_account(address); - } - } - - /// Get the account basic in EVM format. - pub fn account_basic(address: &H160) -> Account { - let account_id = T::AddressMapping::into_account_id(*address); - - let nonce = frame_system::Module::::account_nonce(&account_id); - let balance = T::Currency::free_balance(&account_id); - - Account { - nonce: U256::from(UniqueSaturatedInto::::unique_saturated_into(nonce)), - balance: U256::from(UniqueSaturatedInto::::unique_saturated_into(balance)), - } - } - - /// Execute a create transaction on behalf of given sender. - pub fn execute_create( - source: H160, - init: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, H160, U256, Vec), Error> { - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| { - let address = executor.create_address( - evm::CreateScheme::Legacy { caller: source }, - ); - (executor.transact_create( - source, - value, - init, - gas_limit as usize, - ), address) - }, - ) - } - - /// Execute a create2 transaction on behalf of a given sender. - pub fn execute_create2( - source: H160, - init: Vec, - salt: H256, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, H160, U256, Vec), Error> { - let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice()); - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| { - let address = executor.create_address( - evm::CreateScheme::Create2 { caller: source, code_hash, salt }, - ); - (executor.transact_create2( - source, - value, - init, - salt, - gas_limit as usize, - ), address) - }, - ) - } - - /// Execute a call transaction on behalf of a given sender. - pub fn execute_call( - source: H160, - target: H160, - input: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, Vec, U256, Vec), Error> { - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| executor.transact_call( - source, - target, - value, - input, - gas_limit as usize, - ), - ) - } - - /// Execute an EVM operation. - fn execute_evm( - source: H160, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - f: F, - ) -> Result<(ExitReason, R, U256, Vec), Error> where - F: FnOnce(&mut StackExecutor>) -> (ExitReason, R), - { - - // Gas price check is skipped when performing a gas estimation. - if apply_state { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); - } - - let vicinity = Vicinity { - gas_price, - origin: source, - }; - - let mut backend = Backend::::new(&vicinity); - let mut executor = StackExecutor::new_with_precompile( - &backend, - gas_limit as usize, - T::config(), - T::Precompiles::execute, - ); - - let total_fee = gas_price.checked_mul(U256::from(gas_limit)) - .ok_or(Error::::FeeOverflow)?; - let total_payment = value.checked_add(total_fee).ok_or(Error::::PaymentOverflow)?; - let source_account = Self::account_basic(&source); - ensure!(source_account.balance >= total_payment, Error::::BalanceLow); - executor.withdraw(source, total_fee).map_err(|_| Error::::WithdrawFailed)?; - - if let Some(nonce) = nonce { - ensure!(source_account.nonce == nonce, Error::::InvalidNonce); - } - - let (retv, reason) = f(&mut executor); - - let used_gas = U256::from(executor.used_gas()); - let actual_fee = executor.fee(gas_price); - debug::debug!( - target: "evm", - "Execution {:?} [source: {:?}, value: {}, gas_limit: {}, used_gas: {}, actual_fee: {}]", - retv, - source, - value, - gas_limit, - used_gas, - actual_fee - ); - executor.deposit(source, total_fee.saturating_sub(actual_fee)); - - let (values, logs) = executor.deconstruct(); - let logs_data = logs.into_iter().map(|x| x ).collect::>(); - let logs_result = logs_data.clone().into_iter().map(|it| { - Log { - address: it.address, - topics: it.topics, - data: it.data - } - }).collect(); - if apply_state { - backend.apply(values, logs_data, true); - } - - Ok((retv, reason, used_gas, logs_result)) - } -} diff --git a/frame/evm/src/precompiles.rs b/frame/evm/src/precompiles.rs deleted file mode 100644 index 440d9bf1c68c2175b6f9124bc4d1f9a1187f96ab..0000000000000000000000000000000000000000 --- a/frame/evm/src/precompiles.rs +++ /dev/null @@ -1,167 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Builtin precompiles. - -use sp_std::{cmp::min, vec::Vec}; -use sp_core::H160; -use evm::{ExitError, ExitSucceed}; -use ripemd160::Digest; -use impl_trait_for_tuples::impl_for_tuples; - -/// Custom precompiles to be used by EVM engine. -pub trait Precompiles { - /// Try to execute the code address as precompile. If the code address is not - /// a precompile or the precompile is not yet available, return `None`. - /// Otherwise, calculate the amount of gas needed with given `input` and - /// `target_gas`. Return `Some(Ok(status, output, gas_used))` if the execution - /// is successful. Otherwise return `Some(Err(_))`. - fn execute( - address: H160, - input: &[u8], - target_gas: Option, - ) -> Option, usize), ExitError>>; -} - -/// One single precompile used by EVM engine. -pub trait Precompile { - /// Try to execute the precompile. Calculate the amount of gas needed with given `input` and - /// `target_gas`. Return `Ok(status, output, gas_used)` if the execution is - /// successful. Otherwise return `Err(_)`. - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError>; -} - -#[impl_for_tuples(16)] -#[tuple_types_no_default_trait_bound] -impl Precompiles for Tuple { - for_tuples!( where #( Tuple: Precompile )* ); - - fn execute( - address: H160, - input: &[u8], - target_gas: Option, - ) -> Option, usize), ExitError>> { - let mut index = 0; - - for_tuples!( #( - index += 1; - if address == H160::from_low_u64_be(index) { - return Some(Tuple::execute(input, target_gas)) - } - )* ); - - None - } -} - -/// Linear gas cost -fn ensure_linear_cost( - target_gas: Option, - len: usize, - base: usize, - word: usize -) -> Result { - let cost = base.checked_add( - word.checked_mul(len.saturating_add(31) / 32).ok_or(ExitError::OutOfGas)? - ).ok_or(ExitError::OutOfGas)?; - - if let Some(target_gas) = target_gas { - if cost > target_gas { - return Err(ExitError::OutOfGas) - } - } - - Ok(cost) -} - -/// The identity precompile. -pub struct Identity; - -impl Precompile for Identity { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 15, 3)?; - - Ok((ExitSucceed::Returned, input.to_vec(), cost)) - } -} - -/// The ecrecover precompile. -pub struct ECRecover; - -impl Precompile for ECRecover { - fn execute( - i: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, i.len(), 3000, 0)?; - - let mut input = [0u8; 128]; - input[..min(i.len(), 128)].copy_from_slice(&i[..min(i.len(), 128)]); - - let mut msg = [0u8; 32]; - let mut sig = [0u8; 65]; - - msg[0..32].copy_from_slice(&input[0..32]); - sig[0..32].copy_from_slice(&input[64..96]); - sig[32..64].copy_from_slice(&input[96..128]); - sig[64] = input[63]; - - let pubkey = sp_io::crypto::secp256k1_ecdsa_recover(&sig, &msg) - .map_err(|_| ExitError::Other("Public key recover failed"))?; - let mut address = sp_io::hashing::keccak_256(&pubkey); - address[0..12].copy_from_slice(&[0u8; 12]); - - Ok((ExitSucceed::Returned, address.to_vec(), cost)) - } -} - -/// The ripemd precompile. -pub struct Ripemd160; - -impl Precompile for Ripemd160 { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 600, 120)?; - - let mut ret = [0u8; 32]; - ret[12..32].copy_from_slice(&ripemd160::Ripemd160::digest(input)); - Ok((ExitSucceed::Returned, ret.to_vec(), cost)) - } -} - -/// The sha256 precompile. -pub struct Sha256; - -impl Precompile for Sha256 { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 60, 12)?; - - let ret = sp_io::hashing::sha2_256(input); - Ok((ExitSucceed::Returned, ret.to_vec(), cost)) - } -} diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs deleted file mode 100644 index d05fdca1407e5678fbae2d93d0914185b256436e..0000000000000000000000000000000000000000 --- a/frame/evm/src/tests.rs +++ /dev/null @@ -1,189 +0,0 @@ -#![cfg(test)] - -use super::*; - -use std::{str::FromStr, collections::BTreeMap}; -use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, -}; -use sp_core::{Blake2Hasher, H256}; -use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum OuterCall for Test where origin: Origin { - self::EVM, - } -} - -#[derive(Clone, Eq, PartialEq)] -pub struct Test; -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} -impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = OuterCall; - type Hashing = BlakeTwo256; - type AccountId = AccountId32; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type PalletInfo = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: u64 = 1; -} -impl pallet_balances::Trait for Test { - type MaxLocks = (); - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); -} - -parameter_types! { - pub const MinimumPeriod: u64 = 1000; -} -impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; - type WeightInfo = (); -} - -/// Fixed gas price of `0`. -pub struct FixedGasPrice; -impl FeeCalculator for FixedGasPrice { - fn min_gas_price() -> U256 { - // Gas price is always one token per gas. - 0.into() - } -} - -impl Trait for Test { - type FeeCalculator = FixedGasPrice; - - type CallOrigin = EnsureAddressRoot; - type WithdrawOrigin = EnsureAddressNever; - - type AddressMapping = HashedAddressMapping; - type Currency = Balances; - - type Event = Event; - type Precompiles = (); - type ChainId = SystemChainId; -} - -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type EVM = Module; - -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - let mut accounts = BTreeMap::new(); - accounts.insert( - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - GenesisAccount { - nonce: U256::from(1), - balance: U256::from(1000000), - storage: Default::default(), - code: vec![ - 0x00, // STOP - ], - } - ); - accounts.insert( - H160::from_str("1000000000000000000000000000000000000002").unwrap(), - GenesisAccount { - nonce: U256::from(1), - balance: U256::from(1000000), - storage: Default::default(), - code: vec![ - 0xff, // INVALID - ], - } - ); - - pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); - GenesisConfig { accounts }.assimilate_storage::(&mut t).unwrap(); - t.into() -} - -#[test] -fn fail_call_return_ok() { - new_test_ext().execute_with(|| { - assert_ok!(EVM::call( - Origin::root(), - H160::default(), - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Vec::new(), - U256::default(), - 1000000, - U256::default(), - None, - )); - - assert_ok!(EVM::call( - Origin::root(), - H160::default(), - H160::from_str("1000000000000000000000000000000000000002").unwrap(), - Vec::new(), - U256::default(), - 1000000, - U256::default(), - None, - )); - }); -} - -#[test] -fn mutate_account_works() { - new_test_ext().execute_with(|| { - EVM::mutate_account_basic( - &H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Account { - nonce: U256::from(10), - balance: U256::from(1000), - }, - ); - - assert_eq!(EVM::account_basic( - &H160::from_str("1000000000000000000000000000000000000001").unwrap() - ), Account { - nonce: U256::from(10), - balance: U256::from(1000), - }); - }); -} diff --git a/frame/example-offchain-worker/README.md b/frame/example-offchain-worker/README.md index a2a95a8cfb97e24943251d01e593168bfe543542..5299027f39250d515d1b72e07b020f02c287c8ef 100644 --- a/frame/example-offchain-worker/README.md +++ b/frame/example-offchain-worker/README.md @@ -24,4 +24,4 @@ Additional logic in OCW is put in place to prevent spamming the network with bot and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only one unsigned transaction floating in the network. -License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index b64e3f8dd83f5dbad24a1e856f6effbaf511ec3d..29e545ae2d97b14bda7401d65f54d3370f20a061 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -24,7 +24,7 @@ //! Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's //! documentation. //! -//! - [`pallet_example_offchain_worker::Trait`](./trait.Trait.html) +//! - [`pallet_example_offchain_worker::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -103,12 +103,12 @@ pub mod crypto { } /// This pallet's configuration trait -pub trait Trait: CreateSignedTransaction> { +pub trait Config: CreateSignedTransaction> { /// The identifier type for an offchain worker. type AuthorityId: AppCrypto; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching dispatch call type. type Call: From>; @@ -149,7 +149,7 @@ impl SignedPayload for PricePayload as ExampleOffchainWorker { + trait Store for Module as ExampleOffchainWorker { /// A vector of recently submitted prices. /// /// This is used to calculate average price, should have bounded size. @@ -165,7 +165,7 @@ decl_storage! { decl_event!( /// Events generated by the module. - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { /// Event generated when new price is accepted to contribute to the average. /// \[price, who\] NewPrice(u32, AccountId), @@ -174,7 +174,7 @@ decl_event!( decl_module! { /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; /// Submit new price to the list. @@ -310,7 +310,7 @@ enum TransactionType { /// /// This greatly helps with error messages, as the ones inside the macro /// can sometimes be hard to debug. -impl Module { +impl Module { /// Chooses which transaction type to send. /// /// This function serves mostly to showcase `StorageValue` helper @@ -679,7 +679,7 @@ impl Module { } #[allow(deprecated)] // ValidateUnsigned -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; /// Validate unsigned call to this module. diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index 204b366964f47b46053ea63edf6673bc8d1f1dcd..196d4cac4adcc6f4e4579feed1ee3e4d76d20a46 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -20,7 +20,6 @@ use std::sync::Arc; use codec::{Encode, Decode}; use frame_support::{ assert_ok, impl_outer_origin, parameter_types, - weights::Weight, }; use sp_core::{ H256, @@ -33,7 +32,7 @@ use sp_keystore::{ testing::KeyStore, }; use sp_runtime::{ - Perbill, RuntimeAppPublic, + RuntimeAppPublic, testing::{Header, TestXt}, traits::{ BlakeTwo256, IdentityLookup, Extrinsic as ExtrinsicT, @@ -52,12 +51,14 @@ impl_outer_origin! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = (); type Index = u64; @@ -69,13 +70,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -118,7 +112,7 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Test { +impl Config for Test { type Event = (); type AuthorityId = crypto::TestAuthId; type Call = Call; @@ -282,7 +276,7 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let signature_valid = ::Public, - ::BlockNumber + ::BlockNumber > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); @@ -335,7 +329,7 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let signature_valid = ::Public, - ::BlockNumber + ::BlockNumber > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); diff --git a/frame/example-parallel/src/lib.rs b/frame/example-parallel/src/lib.rs index 4b7ce72b4d40e74960058699c3f20de28298981e..b616e3d49278aa58502c289e52ce392a13af41f9 100644 --- a/frame/example-parallel/src/lib.rs +++ b/frame/example-parallel/src/lib.rs @@ -34,15 +34,15 @@ use sp_std::vec::Vec; #[cfg(test)] mod tests; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// The overarching dispatch call type. type Call: From>; } decl_storage! { - trait Store for Module as ExampleOffchainWorker { + trait Store for Module as ExampleOffchainWorker { /// A vector of current participants /// /// To enlist someone to participate, signed payload should be @@ -87,7 +87,7 @@ impl EnlistedParticipant { decl_module! { /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; /// Get the new event running. diff --git a/frame/example-parallel/src/tests.rs b/frame/example-parallel/src/tests.rs index 1da8c60388266d60a8adaaf16cf5c3aafe489b01..24e846c3de42c6e0ae6fb427b759d5956c7d5c96 100644 --- a/frame/example-parallel/src/tests.rs +++ b/frame/example-parallel/src/tests.rs @@ -18,7 +18,7 @@ use crate::*; use codec::{Encode, Decode}; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::{impl_outer_origin, parameter_types}; use sp_core::H256; use sp_runtime::{ Perbill, @@ -34,12 +34,10 @@ impl_outer_origin! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); type Origin = Origin; type Call = (); @@ -53,13 +51,9 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; + type BlockWeights = (); + type BlockLength = (); type Version = (); type AccountData = (); type OnNewAccount = (); @@ -73,7 +67,7 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Test { +impl Config for Test { type Event = (); type Call = Call; } diff --git a/frame/example/README.md b/frame/example/README.md index f1435a297b09e314c72cb66e54931d3aabf379da..46a0d076a969a803a63ce56de79d4f233bb03da9 100644 --- a/frame/example/README.md +++ b/frame/example/README.md @@ -195,7 +195,7 @@ Copy and paste this template from frame/example/src/lib.rs into file \```rust use ; -pub trait Trait: ::Trait { } +pub trait Config: ::Config { } \``` \### Simple Code Snippet @@ -235,4 +235,4 @@ pub trait Trait: ::Trait { } // that the implementation is based on.

-License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 103bcfe6968684a494feae4a57c1bf827deef525..3ddb2fd4c1d3cdf36c58a167c1f08048fcabf29b 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -63,7 +63,7 @@ //! // Include the following links that shows what trait needs to be implemented to use the pallet //! // and the supported dispatchables that are documented in the Call enum. //! -//! - \[`::Trait`](./trait.Trait.html) +//! - \[`::Config`](./trait.Config.html) //! - \[`Call`](./enum.Call.html) //! - \[`Module`](./struct.Module.html) //! @@ -131,7 +131,7 @@ //! //! //! // Reference documentation of aspects such as `storageItems` and `dispatchable` functions should only be -//! // included in the https://docs.rs Rustdocs for Substrate and not repeated in the README file. +//! // included in the Rustdocs for Substrate and not repeated in the README file. //! //! \### Dispatchable Functions //! @@ -212,7 +212,7 @@ //! \```rust //! use ; //! -//! pub trait Trait: ::Trait { } +//! pub trait Config: ::Config { } //! \``` //! //! \### Simple Code Snippet @@ -224,8 +224,8 @@ //! // Show a usage example in an actual runtime //! //! // See: -//! // - Substrate TCR https://github.com/parity-samples/substrate-tcr -//! // - Substrate Kitties https://shawntabrizi.github.io/substrate-collectables-workshop/#/ +//! // - Substrate TCR +//! // - Substrate Kitties //! //! \## Genesis Config //! @@ -286,9 +286,9 @@ use sp_runtime::{ // - The final weight of each dispatch is calculated as the argument of the call multiplied by the // parameter given to the `WeightForSetDummy`'s constructor. // - assigns a dispatch class `operational` if the argument of the call is more than 1000. -struct WeightForSetDummy(BalanceOf); +struct WeightForSetDummy(BalanceOf); -impl WeighData<(&BalanceOf,)> for WeightForSetDummy +impl WeighData<(&BalanceOf,)> for WeightForSetDummy { fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { let multiplier = self.0; @@ -296,7 +296,7 @@ impl WeighData<(&BalanceOf,)> for WeightForSetDumm } } -impl ClassifyDispatch<(&BalanceOf,)> for WeightForSetDummy { +impl ClassifyDispatch<(&BalanceOf,)> for WeightForSetDummy { fn classify_dispatch(&self, target: (&BalanceOf,)) -> DispatchClass { if *target.0 > >::from(1000u32) { DispatchClass::Operational @@ -306,23 +306,23 @@ impl ClassifyDispatch<(&BalanceOf,)> for WeightFor } } -impl PaysFee<(&BalanceOf,)> for WeightForSetDummy { +impl PaysFee<(&BalanceOf,)> for WeightForSetDummy { fn pays_fee(&self, _target: (&BalanceOf,)) -> Pays { Pays::Yes } } /// A type alias for the balance type from this pallet's point of view. -type BalanceOf = ::Balance; +type BalanceOf = ::Balance; /// Our pallet's configuration trait. All our types and constants go in here. If the /// pallet is dependent on specific other pallets, then their configuration traits /// should be added to our implied traits list. /// -/// `frame_system::Trait` should always be included in our implied traits. -pub trait Trait: pallet_balances::Trait { +/// `frame_system::Config` should always be included in our implied traits. +pub trait Config: pallet_balances::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; } decl_storage! { @@ -333,7 +333,7 @@ decl_storage! { // It is important to update your storage name so that your pallet's // storage items are isolated from other pallets. // ---------------------------------vvvvvvv - trait Store for Module as Example { + trait Store for Module as Example { // Any storage declarations of the form: // `pub? Name get(fn getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` // where `` is either: @@ -371,7 +371,7 @@ decl_event!( /// Events are a simple means of reporting specific conditions and /// circumstances that have happened that users, Dapps and/or chain explorers would find /// interesting and otherwise difficult to detect. - pub enum Event where B = ::Balance { + pub enum Event where B = ::Balance { // Just a normal `enum`, here's a dummy event to ensure it compiles. /// Dummy event, just here so there's a generic type that's used. Dummy(B), @@ -414,7 +414,7 @@ decl_event!( // `ensure_root` and `ensure_none`. decl_module! { // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Deposit one of this pallet's events by using the default implementation. /// It is also possible to provide a custom implementation. /// For non-generic events, the generic parameter just needs to be dropped, so that it @@ -548,7 +548,7 @@ decl_module! { // - Public interface. These are functions that are `pub` and generally fall into inspector // functions that do not write to storage and operation functions that do. // - Private functions. These are your usual private utilities unavailable to other pallets. -impl Module { +impl Module { // Add public immutables and private mutables. #[allow(dead_code)] fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { @@ -571,7 +571,7 @@ impl Module { // decodable type that implements `SignedExtension`. See the trait definition for the full list of // bounds. As a convention, you can follow this approach to create an extension for your pallet: // - If the extension does not carry any data, then use a tuple struct with just a `marker` -// (needed for the compiler to accept `T: Trait`) will suffice. +// (needed for the compiler to accept `T: Config`) will suffice. // - Otherwise, create a tuple struct which contains the external data. Of course, for the entire // struct to be decodable, each individual item also needs to be decodable. // @@ -602,21 +602,21 @@ impl Module { /// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No /// particular reason why, just to demonstrate the power of signed extensions. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct WatchDummy(PhantomData); +pub struct WatchDummy(PhantomData); -impl sp_std::fmt::Debug for WatchDummy { +impl sp_std::fmt::Debug for WatchDummy { fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "WatchDummy") } } -impl SignedExtension for WatchDummy +impl SignedExtension for WatchDummy where - ::Call: IsSubType>, + ::Call: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = (); type Pre = (); @@ -718,7 +718,6 @@ mod tests { // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; @@ -740,12 +739,14 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -757,13 +758,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -774,7 +768,7 @@ mod tests { parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); @@ -783,7 +777,7 @@ mod tests { type AccountStore = System; type WeightInfo = (); } - impl Trait for Test { + impl Config for Test { type Event = (); } type System = frame_system::Module; diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index ccb5c2d2628713620e52cf7a8e36eed151abc929..59e9cae198375c5c37052b47b154d62f8516f662 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -145,7 +145,7 @@ pub type OriginOf = as Dispatchable>::Origin; /// Main entry point for certain runtime actions as e.g. `execute_block`. /// /// Generic parameters: -/// - `System`: Something that implements `frame_system::Trait` +/// - `System`: Something that implements `frame_system::Config` /// - `Block`: The block type of the runtime /// - `Context`: The context that is used when checking an extrinsic. /// - `UnsignedValidator`: The unsigned transaction validator of the runtime. @@ -158,7 +158,7 @@ pub struct Executive, Context: Default, UnsignedValidator, @@ -185,7 +185,7 @@ where } impl< - System: frame_system::Trait, + System: frame_system::Config, Block: traits::Block, Context: Default, UnsignedValidator, @@ -251,8 +251,12 @@ where weight = weight.saturating_add( as OnInitialize>::on_initialize(*block_number) ); - weight = weight.saturating_add(>::on_initialize(*block_number)) - .saturating_add(>::get()); + weight = weight.saturating_add( + >::on_initialize(*block_number) + ); + weight = weight.saturating_add( + >::get().base_block + ); >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); frame_system::Module::::note_finished_initialize(); @@ -482,7 +486,7 @@ mod tests { use super::*; use sp_core::H256; use sp_runtime::{ - generic::{Era, DigestItem}, Perbill, DispatchError, testing::{Digest, Header, Block}, + generic::{Era, DigestItem}, DispatchError, testing::{Digest, Header, Block}, traits::{Header as HeaderT, BlakeTwo256, IdentityLookup}, transaction_validity::{ InvalidTransaction, ValidTransaction, TransactionValidityError, UnknownTransaction @@ -493,7 +497,9 @@ mod tests { weights::{Weight, RuntimeDbWeight, IdentityFee, WeightToFeePolynomial}, traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, }; - use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use frame_system::{ + Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo, + }; use pallet_transaction_payment::CurrencyAdapter; use pallet_balances::Call as BalancesCall; use hex_literal::hex; @@ -505,10 +511,10 @@ mod tests { UnknownTransaction, TransactionSource, TransactionValidity }; - pub trait Trait: frame_system::Trait {} + pub trait Config: frame_system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 100] fn some_function(origin) { // NOTE: does not make any different. @@ -555,7 +561,7 @@ mod tests { } } - impl sp_runtime::traits::ValidateUnsigned for Module { + impl sp_runtime::traits::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned( @@ -584,18 +590,22 @@ mod tests { parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const BlockExecutionWeight: Weight = 10; - pub const ExtrinsicBaseWeight: Weight = 5; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::builder() + .base_block(10) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = 5) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = 1024.into()) + .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, write: 100, }; } - impl frame_system::Trait for Runtime { + impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type Call = Call; @@ -607,13 +617,6 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = DbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = RuntimeVersion; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -626,7 +629,7 @@ mod tests { parameter_types! { pub const ExistentialDeposit: Balance = 1; } - impl pallet_balances::Trait for Runtime { + impl pallet_balances::Config for Runtime { type Balance = Balance; type Event = Event; type DustRemoval = (); @@ -639,13 +642,13 @@ mod tests { parameter_types! { pub const TransactionByteFee: Balance = 0; } - impl pallet_transaction_payment::Trait for Runtime { + impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } - impl custom::Trait for Runtime {} + impl custom::Config for Runtime {} pub struct RuntimeVersion; impl frame_support::traits::Get for RuntimeVersion { @@ -668,8 +671,8 @@ mod tests { type TestXt = sp_runtime::testing::TestXt; type TestBlock = Block; type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< - ::AccountId, - ::Call, + ::AccountId, + ::Call, (), SignedExtra, >; @@ -715,9 +718,10 @@ mod tests { balances: vec![(1, 211)], }.assimilate_storage(&mut t).unwrap(); let xt = TestXt::new(Call::Balances(BalancesCall::transfer(2, 69)), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + ::ExtrinsicBaseWeight::get(); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic; let fee: Balance - = ::WeightToFee::calc(&weight); + = ::WeightToFee::calc(&weight); let mut t = sp_io::TestExternalities::new(t); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -749,7 +753,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("465a1569d309039bdf84b0479d28064ea29e6584584dc7d788904bb14489c6f6").into(), + state_root: hex!("ba1a82a264b8007e0c04c9ea35e541593daad08b6e2bf7c0a6780a67d1c55018").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, @@ -817,9 +821,11 @@ mod tests { let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); let encoded = xt.encode(); let encoded_len = encoded.len() as Weight; - // on_initialize weight + block execution weight - let base_block_weight = 175 + ::BlockExecutionWeight::get(); - let limit = AvailableBlockRatio::get() * MaximumBlockWeight::get() - base_block_weight; + // on_initialize weight + base block execution weight + let block_weights = ::BlockWeights::get(); + let base_block_weight = 175 + block_weights.base_block; + let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() + - base_block_weight; let num_to_exhaust_block = limit / (encoded_len + 5); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -861,7 +867,7 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = 175 + ::BlockExecutionWeight::get(); + let base_block_weight = 175 + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -879,7 +885,8 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = len as Weight + ::ExtrinsicBaseWeight::get(); + let extrinsic_weight = len as Weight + ::BlockWeights + ::get().get(DispatchClass::Normal).base_extrinsic; assert_eq!( >::block_weight().total(), base_block_weight + 3 * extrinsic_weight, @@ -945,10 +952,13 @@ mod tests { Call::System(SystemCall::remark(vec![1u8])), sign_extra(1, 0, 0), ); - let weight = xt.get_dispatch_info().weight - + ::ExtrinsicBaseWeight::get(); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights + ::get() + .get(DispatchClass::Normal) + .base_extrinsic; let fee: Balance = - ::WeightToFee::calc(&weight); + ::WeightToFee::calc(&weight); Executive::initialize_block(&Header::new( 1, H256::default(), @@ -1106,7 +1116,7 @@ mod tests { let runtime_upgrade_weight = ::on_runtime_upgrade(); let frame_system_on_initialize_weight = frame_system::Module::::on_initialize(block_number); let on_initialize_weight = >::on_initialize(block_number); - let base_block_weight = ::BlockExecutionWeight::get(); + let base_block_weight = ::BlockWeights::get().base_block; // Weights are recorded correctly assert_eq!( diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index e9662a726c40e308e1e9ff0e4eb14b4d43992b9f..72f1434b24a99183fe5f9f3084c098c257d6d730 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -54,13 +54,13 @@ use sp_staking::{ SessionIndex, }; -use super::{Call, Module, Trait}; +use super::{Call, Module, Config}; /// A trait with utility methods for handling equivocation reports in GRANDPA. /// The offence type is generic, and the trait provides , reporting an offence /// triggered by a valid equivocation report, and also for creating and /// submitting equivocation report extrinsics (useful only in offchain context). -pub trait HandleEquivocation { +pub trait HandleEquivocation { /// The offence type used for reporting offences on valid equivocation reports. type Offence: GrandpaOffence; @@ -86,7 +86,7 @@ pub trait HandleEquivocation { fn block_author() -> Option; } -impl HandleEquivocation for () { +impl HandleEquivocation for () { type Offence = GrandpaEquivocationOffence; fn report_offence( @@ -136,7 +136,7 @@ where // We use the authorship pallet to fetch the current block author and use // `offchain::SendTransactionTypes` for unsigned extrinsic creation and // submission. - T: Trait + pallet_authorship::Trait + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, // A system for reporting offences after valid equivocation reports are // processed. R: ReportOffence, @@ -187,7 +187,7 @@ pub struct GrandpaTimeSlot { /// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` /// to local calls (i.e. extrinsics generated on this node) or that already in a block. This /// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::report_equivocation_unsigned(equivocation_proof, _) = call { diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index fe836ac913cb600fbfced77a460b34674e912e06..15099672d0d2da91e7982e3dce6d77c5c4185ff9 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -67,9 +67,9 @@ pub use equivocation::{ HandleEquivocation, }; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The event type of this module. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// The function call. type Call: From>; @@ -188,7 +188,7 @@ decl_event! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Attempt to signal GRANDPA pause when the authority set isn't live /// (either paused or already pending pause). PauseFailed, @@ -209,7 +209,7 @@ decl_error! { } decl_storage! { - trait Store for Module as GrandpaFinality { + trait Store for Module as GrandpaFinality { /// State of the current authority set. State get(fn state): StoredState = StoredState::Live; @@ -241,7 +241,7 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -372,7 +372,7 @@ decl_module! { } } -impl Module { +impl Module { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() @@ -583,12 +583,12 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module - where T: pallet_session::Trait +impl pallet_session::OneSessionHandler for Module + where T: pallet_session::Config { type Key = AuthorityId; diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index d3461eec12dc4453402feee6339aed111819b065..4a5de63e839bb82ca40767c04a8ea56ab7733a9a 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use crate::{AuthorityId, AuthorityList, ConsensusLog, Module, Trait}; +use crate::{AuthorityId, AuthorityList, ConsensusLog, Module, Config}; use ::grandpa as finality_grandpa; use codec::Encode; use frame_support::{ @@ -74,13 +74,15 @@ pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -92,13 +94,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -122,7 +117,7 @@ parameter_types! { } /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type Event = TestEvent; type ValidatorId = u64; type ValidatorIdOf = pallet_staking::StashOf; @@ -135,7 +130,7 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -144,7 +139,7 @@ parameter_types! { pub const UncleGenerations: u64 = 0; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = (); type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -155,7 +150,7 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u128; type DustRemoval = (); @@ -169,7 +164,7 @@ parameter_types! { pub const MinimumPeriod: u64 = 3; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -198,7 +193,7 @@ parameter_types! { pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type Event = TestEvent; @@ -224,17 +219,17 @@ impl pallet_staking::Trait for Test { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Test { +impl pallet_offences::Config for Test { type Event = TestEvent; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl Trait for Test { +impl Config for Test { type Event = TestEvent; type Call = Call; diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 4916808fe000f3f9919cb9c2f6668f40d2039477..4963d7e6b6d460cd194ba1b046cdc70b55811679 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -850,7 +850,7 @@ fn report_equivocation_has_valid_weight() { // but there's a lower bound of 100 validators. assert!( (1..=100) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] == w[1]) @@ -860,7 +860,7 @@ fn report_equivocation_has_valid_weight() { // with every extra validator. assert!( (100..=1000) - .map(::WeightInfo::report_equivocation) + .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) .all(|w| w[0] < w[1]) diff --git a/frame/identity/README.md b/frame/identity/README.md index 8927febec6bbdc5bf7ace28039c9fde6bf693afb..38e16d4dd490235d99b724fa0b48d6cae406df10 100644 --- a/frame/identity/README.md +++ b/frame/identity/README.md @@ -51,6 +51,6 @@ no state-bloat attack is viable. * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index d7876514452e247ec0cb5fc2fce3699d14194b47..0176986c8224c70c5e294a5f910b2d3687dba0e9 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -29,16 +29,16 @@ use crate::Module as Identity; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } // Adds `r` registrars to the Identity Pallet. These registrars will have set fees and fields. -fn add_registrars(r: u32) -> Result<(), &'static str> { +fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); @@ -57,7 +57,7 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { // Create `s` sub-accounts for the identity of `who` and return them. // Each will have 32 bytes of raw data added to it. -fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { let mut subs = Vec::new(); let who_origin = RawOrigin::Signed(who.clone()); let data = Data::Raw(vec![0; 32]); @@ -77,7 +77,7 @@ fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { let who_origin = RawOrigin::Signed(who.clone()); let subs = create_sub_accounts::(who, s)?; @@ -88,7 +88,7 @@ fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result(num_fields: u32) -> IdentityInfo { +fn create_identity_info(num_fields: u32) -> IdentityInfo { let data = Data::Raw(vec![0; 32]); let info = IdentityInfo { @@ -121,7 +121,7 @@ benchmarks! { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, info)?; }; } @@ -143,7 +143,7 @@ benchmarks! { // The target user let caller: T::AccountId = whitelisted_caller(); let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Add an initial identity @@ -200,7 +200,7 @@ benchmarks! { clear_identity { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -237,7 +237,7 @@ benchmarks! { cancel_request { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in ...; @@ -300,7 +300,7 @@ benchmarks! { provide_judgement { // The user let user: T::AccountId = account("user", r, SEED); - let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); + let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); @@ -328,7 +328,7 @@ benchmarks! { let x in _ .. _ => {}; let target: T::AccountId = account("target", 0, SEED); - let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); + let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 0ee6563a5611d4d341697ebafa04c15bc3bc1f7c..959107e527a2ac34098198eb9ec51f03f5050280 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -17,7 +17,7 @@ //! # Identity Module //! -//! - [`identity::Trait`](./trait.Trait.html) +//! - [`identity::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -68,7 +68,7 @@ //! * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] @@ -91,12 +91,12 @@ use frame_support::{ use frame_system::ensure_signed; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency trait. type Currency: ReservableCurrency; @@ -399,7 +399,7 @@ pub struct RegistrarInfo< } decl_storage! { - trait Store for Module as Identity { + trait Store for Module as Identity { /// Information that is pertinent to identify the entity behind an account. /// /// TWOX-NOTE: OK ― `AccountId` is a secure hash. @@ -428,7 +428,7 @@ decl_storage! { } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { /// A name was set or reset (which will remove all judgements). \[who\] IdentitySet(AccountId), /// A name was cleared, and the given balance returned. \[who, deposit\] @@ -456,7 +456,7 @@ decl_event!( decl_error! { /// Error for the identity module. - pub enum Error for Module { + pub enum Error for Module { /// Too many subs-accounts. TooManySubAccounts, /// Account isn't found. @@ -494,7 +494,7 @@ decl_error! { decl_module! { /// Identity module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The amount held on deposit for a registered identity. const BasicDeposit: BalanceOf = T::BasicDeposit::get(); @@ -1125,7 +1125,7 @@ decl_module! { } } -impl Module { +impl Module { /// Get the subs of an account. pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { SubsOf::::get(who).1 @@ -1134,3 +1134,4 @@ impl Module { .collect() } } + diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 0637ac6aafc5f6cb9bbd6427339d46285c041891..7f3a95dcd124aed47db1905108a81496416b554c 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -21,13 +21,13 @@ use super::*; use sp_runtime::traits::BadOrigin; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, + assert_ok, assert_noop, impl_outer_origin, parameter_types, ord_parameter_types, }; use sp_core::H256; use frame_system::{EnsureSignedBy, EnsureOneOf, EnsureRoot}; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; impl_outer_origin! { @@ -38,12 +38,13 @@ impl_outer_origin! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -55,13 +56,7 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -72,7 +67,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type Balance = u64; type Event = (); type DustRemoval = (); @@ -103,7 +98,7 @@ type EnsureTwoOrRoot = EnsureOneOf< EnsureRoot, EnsureSignedBy >; -impl Trait for Test { +impl Config for Test { type Event = (); type Currency = Balances; type Slashed = (); diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 44efbb31035efec8678e23af7d4f1d15ec1018a8..431a26cc0960d57ca2e3bab49f3cdaa6ac71813a 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -64,7 +64,7 @@ pub trait WeightInfo { /// Weights for pallet_identity using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn add_registrar(r: u32, ) -> Weight { (28_965_000 as Weight) .saturating_add((421_000 as Weight).saturating_mul(r as Weight)) diff --git a/frame/im-online/README.md b/frame/im-online/README.md index 9a65bb6a980861830b28987134528002f14eb202..a2ed5edc906a2497a2b2cb8c3fcd9994a6972c8b 100644 --- a/frame/im-online/README.md +++ b/frame/im-online/README.md @@ -30,10 +30,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_im_online::{self as im_online}; -pub trait Trait: im_online::Trait {} +pub trait Config: im_online::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -48,4 +48,4 @@ decl_module! { This module depends on the [Session module](https://docs.rs/pallet-session/latest/pallet_session/). -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index b92be023ce480013cdfa2345027a6b48cdd4cc46..452a9f26ed7d090978cef9bb4159d5004171453b 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -34,7 +34,7 @@ use crate::Module as ImOnline; const MAX_KEYS: u32 = 1000; const MAX_EXTERNAL_ADDRESSES: u32 = 100; -pub fn create_heartbeat(k: u32, e: u32) -> +pub fn create_heartbeat(k: u32, e: u32) -> Result<(crate::Heartbeat, ::Signature), &'static str> { let mut keys = Vec::new(); diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 2d3693d12720704c2b27a3c19be376a511d18d48..09cb2afa22be4f7a6708023ef47726e633cbafe3 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -30,7 +30,7 @@ //! as the [NetworkState](../../client/offchain/struct.NetworkState.html). //! It is submitted as an Unsigned Transaction via off-chain workers. //! -//! - [`im_online::Trait`](./trait.Trait.html) +//! - [`im_online::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -47,10 +47,10 @@ //! use frame_system::ensure_signed; //! use pallet_im_online::{self as im_online}; //! -//! pub trait Trait: im_online::Trait {} +//! pub trait Config: im_online::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; @@ -227,12 +227,12 @@ pub struct Heartbeat pub validators_len: u32, } -pub trait Trait: SendTransactionTypes> + pallet_session::historical::Trait { +pub trait Config: SendTransactionTypes> + pallet_session::historical::Config { /// The identifier type for an authority. type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// An expected duration of the session. /// @@ -262,7 +262,7 @@ pub trait Trait: SendTransactionTypes> + pallet_session::historical:: decl_event!( pub enum Event where - ::AuthorityId, + ::AuthorityId, IdentificationTuple = IdentificationTuple, { /// A new heartbeat was received from `AuthorityId` \[authority_id\] @@ -275,7 +275,7 @@ decl_event!( ); decl_storage! { - trait Store for Module as ImOnline { + trait Store for Module as ImOnline { /// The block number after which it's ok to send heartbeats in current session. /// /// At the beginning of each session we set this to a value that should @@ -307,7 +307,7 @@ decl_storage! { decl_error! { /// Error for the im-online module. - pub enum Error for Module { + pub enum Error for Module { /// Non existent public key. InvalidKey, /// Duplicated heartbeat. @@ -316,7 +316,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -332,7 +332,7 @@ decl_module! { /// # // NOTE: the weight includes the cost of validate_unsigned as it is part of the cost to // import block with such an extrinsic. - #[weight = ::WeightInfo::validate_unsigned_and_then_heartbeat( + #[weight = ::WeightInfo::validate_unsigned_and_then_heartbeat( heartbeat.validators_len as u32, heartbeat.network_state.external_addresses.len() as u32, )] @@ -393,11 +393,11 @@ decl_module! { } } -type OffchainResult = Result::BlockNumber>>; +type OffchainResult = Result::BlockNumber>>; /// Keep track of number of authored blocks per authority, uncles are counted as /// well since they're a valid proof of being online. -impl pallet_authorship::EventHandler for Module { +impl pallet_authorship::EventHandler for Module { fn note_author(author: T::ValidatorId) { Self::note_authorship(author); } @@ -407,7 +407,7 @@ impl pallet_authorship::EventHandler Module { +impl Module { /// Returns `true` if a heartbeat has been received for the authority at /// `authority_index` in the authorities series or if the authority has /// authored at least one block, during the current session. Otherwise @@ -610,11 +610,11 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = T::AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl pallet_session::OneSessionHandler for Module { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) @@ -677,7 +677,7 @@ impl pallet_session::OneSessionHandler for Module { /// Invalid transaction custom error. Returned when validators_len field in heartbeat is incorrect. const INVALID_VALIDATORS_LEN: u8 = 10; -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned( diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index dae4bb3447e56423ac4a3f4c479f09fdbeab8e03..0a6dc1f79c07ae7db77c8a0d36a0729f82567b00 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -21,13 +21,13 @@ use std::cell::RefCell; -use crate::{Module, Trait}; +use crate::{Module, Config}; use sp_runtime::Perbill; use sp_staking::{SessionIndex, offence::{ReportOffence, OffenceError}}; use sp_runtime::testing::{Header, UintAuthorityId, TestXt}; use sp_runtime::traits::{IdentityLookup, BlakeTwo256, ConvertInto}; use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types, weights::Weight}; +use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; impl_outer_origin!{ pub enum Origin for Runtime {} @@ -104,13 +104,15 @@ pub struct Runtime; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -122,13 +124,6 @@ impl frame_system::Trait for Runtime { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -146,7 +141,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type ShouldEndSession = pallet_session::PeriodicSessions; type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = (ImOnline, ); @@ -159,7 +154,7 @@ impl pallet_session::Trait for Runtime { type WeightInfo = (); } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = u64; type FullIdentificationOf = ConvertInto; } @@ -168,7 +163,7 @@ parameter_types! { pub const UncleGenerations: u32 = 5; } -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = (); type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -179,7 +174,7 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Runtime { +impl Config for Runtime { type AuthorityId = UintAuthorityId; type Event = (); type ReportUnresponsiveness = OffenceHandler; diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index f9df679bd2beade6e5f715fc8b28d2ce732d4f66..c0f11c69c4b2355ce9f7528cf17d87a41416693c 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -49,7 +49,7 @@ pub trait WeightInfo { /// Weights for pallet_im_online using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { (114_379_000 as Weight) .saturating_add((219_000 as Weight).saturating_mul(k as Weight)) diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 6d467aa67344510b480971f51a8e70301a17c5fd..18eb5449848177a8760a8d969e269f23de801e60 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -37,10 +37,10 @@ use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Re use frame_system::{ensure_signed, ensure_root}; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The module's config trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Type used for storing an account's index; implies the maximum number of accounts the system /// can hold. type AccountIndex: Parameter + Member + Codec + Default + AtLeast32Bit + Copy; @@ -52,14 +52,14 @@ pub trait Trait: frame_system::Trait { type Deposit: Get>; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } decl_storage! { - trait Store for Module as Indices { + trait Store for Module as Indices { /// The lookup from index to account. pub Accounts build(|config: &GenesisConfig| config.indices.iter() @@ -75,8 +75,8 @@ decl_storage! { decl_event!( pub enum Event where - ::AccountId, - ::AccountIndex + ::AccountId, + ::AccountIndex { /// A account index was assigned. \[index, who\] IndexAssigned(AccountId, AccountIndex), @@ -88,7 +88,7 @@ decl_event!( ); decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// The index was not already assigned. NotAssigned, /// The index is assigned to another account. @@ -103,7 +103,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = frame_system { + pub struct Module for enum Call where origin: T::Origin, system = frame_system { /// The deposit needed for reserving an index. const Deposit: BalanceOf = T::Deposit::get(); @@ -275,7 +275,7 @@ decl_module! { } } -impl Module { +impl Module { // PUBLIC IMMUTABLES /// Lookup an T::AccountIndex to get an Id, if there's one there. @@ -295,7 +295,7 @@ impl Module { } } -impl StaticLookup for Module { +impl StaticLookup for Module { type Source = MultiAddress; type Target = T::AccountId; diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index cfbd2e38c3d3ff60c8e74d7f24a016cee77ad526..63f0277548f928f2b11699dc772ddcf7924605f1 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -20,10 +20,9 @@ #![cfg(test)] use sp_runtime::testing::Header; -use sp_runtime::Perbill; use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types, weights::Weight}; -use crate::{self as indices, Module, Trait}; +use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; +use crate::{self as indices, Module, Config}; use frame_system as system; use pallet_balances as balances; @@ -44,13 +43,15 @@ pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = (); type Index = u64; @@ -62,13 +63,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = MetaEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -81,7 +75,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); @@ -95,7 +89,7 @@ parameter_types! { pub const Deposit: u64 = 1; } -impl Trait for Test { +impl Config for Test { type AccountIndex = u64; type Currency = Balances; type Deposit = Deposit; diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 36d990cec52aa5d2dde9a34b5165a2fb657dc36f..96470625329f06915b7d7ab87bb9a27dfe36502f 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -53,7 +53,7 @@ pub trait WeightInfo { /// Weights for pallet_indices using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn claim() -> Weight { (53_799_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 06188c42b21bdc6a99617ac18f724060224a2fa7..cfdc38752b5e04452e495f042776e65038a572ec 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -30,9 +30,9 @@ use frame_support::{ }; use frame_system::ensure_signed; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Required origin for adding a member (though can always be Root). type AddOrigin: EnsureOrigin; @@ -59,7 +59,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Membership { + trait Store for Module, I: Instance=DefaultInstance> as Membership { /// The current membership, stored as an ordered Vec. Members get(fn members): Vec; @@ -80,8 +80,8 @@ decl_storage! { decl_event!( pub enum Event where - ::AccountId, - >::Event, + ::AccountId, + >::Event, { /// The given member was added; see the transaction for who. MemberAdded, @@ -100,7 +100,7 @@ decl_event!( decl_error! { /// Error for the nicks module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Already a member. AlreadyMember, /// Not a member. @@ -109,7 +109,7 @@ decl_error! { } decl_module! { - pub struct Module, I: Instance=DefaultInstance> + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { @@ -253,7 +253,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { fn rejig_prime(members: &[T::AccountId]) { if let Some(prime) = Prime::::get() { match members.binary_search(&prime) { @@ -264,7 +264,7 @@ impl, I: Instance> Module { } } -impl, I: Instance> Contains for Module { +impl, I: Instance> Contains for Module { fn sorted_members() -> Vec { Self::members() } @@ -279,11 +279,11 @@ mod tests { use super::*; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, + assert_ok, assert_noop, impl_outer_origin, parameter_types, ord_parameter_types }; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; use frame_system::EnsureSignedBy; impl_outer_origin! { @@ -294,14 +294,16 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); pub static Members: Vec = vec![]; pub static Prime: Option = None; } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -313,13 +315,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -359,7 +354,7 @@ mod tests { } } - impl Trait for Test { + impl Config for Test { type Event = (); type AddOrigin = EnsureSignedBy; type RemoveOrigin = EnsureSignedBy; diff --git a/frame/evm/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml similarity index 57% rename from frame/evm/Cargo.toml rename to frame/merkle-mountain-range/Cargo.toml index a228dfb566be28011a8a9bdf84aa150ba07de781..b46f42cacf6568333ae3b189ec600c574004749e 100644 --- a/frame/evm/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -1,51 +1,44 @@ [package] -name = "pallet-evm" +name = "pallet-mmr" version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "FRAME EVM contracts pallet" -readme = "README.md" +description = "FRAME Merkle Mountain Range pallet." [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "2.0.0", default-features = false, path = "../support" } frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } +mmr-lib = { package = "ckb-merkle-mountain-range", default-features = false, version = "0.3.1" } +serde = { version = "1.0.101", optional = true } sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -primitive-types = { version = "0.7.0", default-features = false, features = ["rlp", "byteorder"] } -rlp = { version = "0.4", default-features = false } -evm = { version = "0.17", default-features = false } -sha3 = { version = "0.8", default-features = false } -impl-trait-for-tuples = "0.1" -ripemd160 = { version = "0.9", default-features = false } + +[dev-dependencies] +env_logger = "0.5" +hex-literal = "0.3" [features] default = ["std"] std = [ - "serde", "codec/std", - "sp-core/std", - "sp-runtime/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", - "pallet-balances/std", + "mmr-lib/std", + "serde", + "sp-core/std", "sp-io/std", + "sp-runtime/std", "sp-std/std", - "sha3/std", - "rlp/std", - "primitive-types/std", - "evm/std", - "pallet-timestamp/std", - "ripemd160/std", ] +runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..af634e18821fc6e7edb7b71bc8a0298021e8fa99 --- /dev/null +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the MMR pallet. + +#![cfg_attr(not(feature = "std"), no_std)] + +use crate::*; +use frame_support::traits::OnInitialize; +use frame_benchmarking::benchmarks; +use sp_std::prelude::*; + +benchmarks! { + _ { } + + on_initialize { + let x in 1 .. 1_000; + + let leaves = x as u64; + }: { + for b in 0..leaves { + Module::::on_initialize((b as u32).into()); + } + } verify { + assert_eq!(crate::NumberOfLeaves::::get(), leaves); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::*; + use crate::tests::new_test_ext; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_on_initialize::()); + }) + } +} diff --git a/frame/merkle-mountain-range/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..0b31698545accd152cada004a0d5fe6d5ae734bb --- /dev/null +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Default weights for the MMR Pallet +//! This file was not auto-generated. + +use frame_support::weights::{ + Weight, constants::{WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, +}; + +impl crate::WeightInfo for () { + fn on_initialize(peaks: u64) -> Weight { + // Reading the parent hash. + let leaf_weight = DbWeight::get().reads(1); + // Blake2 hash cost. + let hash_weight = 2 * WEIGHT_PER_NANOS; + // No-op hook. + let hook_weight = 0; + + leaf_weight + .saturating_add(hash_weight) + .saturating_add(hook_weight) + .saturating_add(DbWeight::get().reads_writes( + 2 + peaks, + 2 + peaks, + )) + } +} diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..81833a20538618cda4de281fe53f403ece9c51e7 --- /dev/null +++ b/frame/merkle-mountain-range/src/lib.rs @@ -0,0 +1,231 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Merkle Mountain Range +//! +//! ## Overview +//! +//! Details on Merkle Mountain Ranges (MMRs) can be found here: +//! +//! +//! The MMR pallet constructs a MMR from leaf data obtained on every block from +//! `LeafDataProvider`. MMR nodes are stored both in: +//! - on-chain storage - hashes only; not full leaf content) +//! - off-chain storage - via Indexing API we push full leaf content (and all internal nodes as +//! well) to the Off-chain DB, so that the data is available for Off-chain workers. +//! Hashing used for MMR is configurable independently from the rest of the runtime (i.e. not using +//! `frame_system::Hashing`) so something compatible with external chains can be used (like +//! Keccak256 for Ethereum compatibility). +//! +//! Depending on the usage context (off-chain vs on-chain) the pallet is able to: +//! - verify MMR leaf proofs (on-chain) +//! - generate leaf proofs (off-chain) +//! +//! See [primitives::Compact] documentation for how you can optimize proof size for leafs that are +//! composed from multiple elements. +//! +//! ## What for? +//! +//! Primary use case for this pallet is to generate MMR root hashes, that can latter on be used by +//! BEEFY protocol (see ). +//! MMR root hashes along with BEEFY will make it possible to build Super Light Clients (SLC) of +//! Substrate-based chains. The SLC will be able to follow finality and can be shown proofs of more +//! details that happened on the source chain. +//! In that case the chain which contains the pallet generates the Root Hashes and Proofs, which +//! are then presented to another chain acting as a light client which can verify them. +//! +//! Secondary use case is to archive historical data, but still be able to retrieve them on-demand +//! if needed. For instance if parent block hashes are stored in the MMR it's possible at any point +//! in time to provide a MMR proof about some past block hash, while this data can be safely pruned +//! from on-chain storage. +//! +//! NOTE This pallet is experimental and not proven to work in production. +//! +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Encode; +use frame_support::{ + decl_module, decl_storage, + weights::Weight, +}; +use sp_runtime::traits; + +mod default_weights; +mod mmr; +#[cfg(any(feature = "runtime-benchmarks", test))] +mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub mod primitives; + +pub trait WeightInfo { + fn on_initialize(peaks: u64) -> Weight; +} + +/// This pallet's configuration trait +pub trait Config: frame_system::Config { + /// Prefix for elements stored in the Off-chain DB via Indexing API. + /// + /// Each node of the MMR is inserted both on-chain and off-chain via Indexing API. + /// The former does not store full leaf content, just it's compact version (hash), + /// and some of the inner mmr nodes might be pruned from on-chain storage. + /// The later will contain all the entries in their full form. + /// + /// Each node is stored in the Off-chain DB under key derived from the [`Self::INDEXING_PREFIX`] and + /// it's in-tree index (MMR position). + const INDEXING_PREFIX: &'static [u8]; + + /// A hasher type for MMR. + /// + /// To construct trie nodes that result in merging (bagging) two peaks, depending on the node + /// kind we take either: + /// - The node (hash) itself if it's an inner node. + /// - The hash of SCALE-encoding of the leaf data if it's a leaf node. + /// + /// Then we create a tuple of these two hashes, SCALE-encode it (concatenate) and + /// hash, to obtain a new MMR inner node - the new peak. + type Hashing: traits::Hash>::Hash>; + + /// The hashing output type. + /// + /// This type is actually going to be stored in the MMR. + /// Required to be provided again, to satisfy trait bounds for storage items. + type Hash: traits::Member + traits::MaybeSerializeDeserialize + sp_std::fmt::Debug + + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + codec::Codec + + codec::EncodeLike; + + /// Data stored in the leaf nodes. + /// + /// The [LeafData](primitives::LeafDataProvider) is responsible for returning the entire leaf + /// data that will be inserted to the MMR. + /// [LeafDataProvider](primitives::LeafDataProvider)s can be composed into tuples to put + /// multiple elements into the tree. In such a case it might be worth using [primitives::Compact] + /// to make MMR proof for one element of the tuple leaner. + type LeafData: primitives::LeafDataProvider; + + /// A hook to act on the new MMR root. + /// + /// For some applications it might be beneficial to make the MMR root available externally + /// apart from having it in the storage. For instance you might output it in the header digest + /// (see [frame_system::Module::deposit_log]) to make it available for Light Clients. + /// Hook complexity should be `O(1)`. + type OnNewRoot: primitives::OnNewRoot<>::Hash>; + + /// Weights for this pallet. + type WeightInfo: WeightInfo; +} + +decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as MerkleMountainRange { + /// Latest MMR Root hash. + pub RootHash get(fn mmr_root_hash): >::Hash; + + /// Current size of the MMR (number of leaves). + pub NumberOfLeaves get(fn mmr_leaves): u64; + + /// Hashes of the nodes in the MMR. + /// + /// Note this collection only contains MMR peaks, the inner nodes (and leaves) + /// are pruned and only stored in the Offchain DB. + pub Nodes get(fn mmr_peak): map hasher(identity) u64 => Option<>::Hash>; + } +} + +decl_module! { + /// A public part of the pallet. + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + fn on_initialize(n: T::BlockNumber) -> Weight { + use primitives::LeafDataProvider; + let leaves = Self::mmr_leaves(); + let peaks_before = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + let data = T::LeafData::leaf_data(); + // append new leaf to MMR + let mut mmr: ModuleMmr = mmr::Mmr::new(leaves); + mmr.push(data).expect("MMR push never fails."); + + // update the size + let (leaves, root) = mmr.finalize().expect("MMR finalize never fails."); + >::on_new_root(&root); + + ::put(leaves); + >::put(root); + + let peaks_after = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + T::WeightInfo::on_initialize(peaks_before.max(peaks_after)) + } + } +} + +/// A MMR specific to the pallet. +type ModuleMmr = mmr::Mmr>; + +/// Leaf data. +type LeafOf = <>::LeafData as primitives::LeafDataProvider>::LeafData; + +/// Hashing used for the pallet. +pub(crate) type HashingOf = >::Hashing; + +impl, I: Instance> Module { + fn offchain_key(pos: u64) -> sp_std::prelude::Vec { + (T::INDEXING_PREFIX, pos).encode() + } + + /// Generate a MMR proof for the given `leaf_index`. + /// + /// Note this method can only be used from an off-chain context + /// (Offchain Worker or Runtime API call), since it requires + /// all the leaves to be present. + /// It may return an error or panic if used incorrectly. + pub fn generate_proof(leaf_index: u64) -> Result< + (LeafOf, primitives::Proof<>::Hash>), + mmr::Error, + > { + let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); + mmr.generate_proof(leaf_index) + } + + /// Verify MMR proof for given `leaf`. + /// + /// This method is safe to use within the runtime code. + /// It will return `Ok(())` if the proof is valid + /// and an `Err(..)` if MMR is inconsistent (some leaves are missing) + /// or the proof is invalid. + pub fn verify_leaf( + leaf: LeafOf, + proof: primitives::Proof<>::Hash>, + ) -> Result<(), mmr::Error> { + if proof.leaf_count > Self::mmr_leaves() + || proof.leaf_count == 0 + || proof.items.len() as u32 > mmr::utils::NodesUtils::new(proof.leaf_count).depth() + { + return Err(mmr::Error::Verify.log_debug( + "The proof has incorrect number of leaves or proof items." + )); + } + + let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); + let is_valid = mmr.verify_leaf_proof(leaf, proof)?; + if is_valid { + Ok(()) + } else { + Err(mmr::Error::Verify.log_debug("The proof is incorrect.")) + } + } +} diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs new file mode 100644 index 0000000000000000000000000000000000000000..ee27163ae435d8513ce5520825536b56d05672bc --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + Config, HashingOf, Instance, + mmr::{ + Node, NodeOf, Hasher, + storage::{Storage, OffchainStorage, RuntimeStorage}, + utils::NodesUtils, + }, + primitives, +}; +use frame_support::{debug, RuntimeDebug}; +use sp_std::fmt; +#[cfg(not(feature = "std"))] +use sp_std::{vec, prelude::Vec}; + +/// A wrapper around a MMR library to expose limited functionality. +/// +/// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) +/// vs [Off-chain](crate::mmr::storage::OffchainStorage)). +pub struct Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf, + Storage: mmr_lib::MMRStore>, +{ + mmr: mmr_lib::MMR< + NodeOf, + Hasher, L>, + Storage + >, + leaves: u64, +} + +impl Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf, + Storage: mmr_lib::MMRStore>, +{ + /// Create a pointer to an existing MMR with given number of leaves. + pub fn new(leaves: u64) -> Self { + let size = NodesUtils::new(leaves).size(); + Self { + mmr: mmr_lib::MMR::new(size, Default::default()), + leaves, + } + } + + /// Verify proof of a single leaf. + pub fn verify_leaf_proof( + &self, + leaf: L, + proof: primitives::Proof<>::Hash>, + ) -> Result { + let p = mmr_lib::MerkleProof::< + NodeOf, + Hasher, L>, + >::new( + self.mmr.mmr_size(), + proof.items.into_iter().map(Node::Hash).collect(), + ); + let position = mmr_lib::leaf_index_to_pos(proof.leaf_index); + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + p.verify( + root, + vec![(position, Node::Data(leaf))], + ).map_err(|e| Error::Verify.log_debug(e)) + } + + /// Return the internal size of the MMR (number of nodes). + #[cfg(test)] + pub fn size(&self) -> u64 { + self.mmr.mmr_size() + } +} + +/// Runtime specific MMR functions. +impl Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf, +{ + + /// Push another item to the MMR. + /// + /// Returns element position (index) in the MMR. + pub fn push(&mut self, leaf: L) -> Option { + let position = self.mmr.push(Node::Data(leaf)) + .map_err(|e| Error::Push.log_error(e)) + .ok()?; + + self.leaves += 1; + + Some(position) + } + + /// Commit the changes to underlying storage, return current number of leaves and + /// calculate the new MMR's root hash. + pub fn finalize(self) -> Result<(u64, >::Hash), Error> { + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + self.mmr.commit().map_err(|e| Error::Commit.log_error(e))?; + Ok((self.leaves, root.hash())) + } +} + +/// Off-chain specific MMR functions. +impl Mmr where + T: Config, + I: Instance, + L: primitives::FullLeaf, +{ + /// Generate a proof for given leaf index. + /// + /// Proof generation requires all the nodes (or their hashes) to be available in the storage. + /// (i.e. you can't run the function in the pruned storage). + pub fn generate_proof(&self, leaf_index: u64) -> Result< + (L, primitives::Proof<>::Hash>), + Error + > { + let position = mmr_lib::leaf_index_to_pos(leaf_index); + let store = >::default(); + let leaf = match mmr_lib::MMRStore::get_elem(&store, position) { + Ok(Some(Node::Data(leaf))) => leaf, + e => return Err(Error::LeafNotFound.log_debug(e)), + }; + let leaf_count = self.leaves; + self.mmr.gen_proof(vec![position]) + .map_err(|e| Error::GenerateProof.log_error(e)) + .map(|p| primitives::Proof { + leaf_index, + leaf_count, + items: p.proof_items().iter().map(|x| x.hash()).collect(), + }) + .map(|p| (leaf, p)) + } +} + +/// Merkle Mountain Range operation error. +#[derive(RuntimeDebug)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub enum Error { + /// Error while pushing new node. + Push, + /// Error getting the new root. + GetRoot, + /// Error commiting changes. + Commit, + /// Error during proof generation. + GenerateProof, + /// Proof verification error. + Verify, + /// Leaf not found in the storage. + LeafNotFound, +} + +impl Error { + /// Consume given error `e` with `self` and generate a native log entry with error details. + pub(crate) fn log_error(self, e: impl fmt::Debug) -> Self { + debug::native::error!("[{:?}] MMR error: {:?}", self, e); + self + } + + /// Consume given error `e` with `self` and generate a native log entry with error details. + pub(crate) fn log_debug(self, e: impl fmt::Debug) -> Self { + debug::native::debug!("[{:?}] MMR error: {:?}", self, e); + self + } + +} + diff --git a/frame/merkle-mountain-range/src/mmr/mod.rs b/frame/merkle-mountain-range/src/mmr/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..7fd8f5ae1bf0ea92245516aec9290207656d4839 --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/mod.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod storage; +pub mod utils; +mod mmr; + +use crate::primitives::FullLeaf; +use sp_runtime::traits; + +pub use self::mmr::{Mmr, Error}; + +/// Node type for runtime `T`. +pub type NodeOf = Node<>::Hashing, L>; + +/// A node stored in the MMR. +pub type Node = crate::primitives::DataOrHash; + +/// Default Merging & Hashing behavior for MMR. +pub struct Hasher(sp_std::marker::PhantomData<(H, L)>); + +impl mmr_lib::Merge for Hasher { + type Item = Node; + + fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item { + let mut concat = left.hash().as_ref().to_vec(); + concat.extend_from_slice(right.hash().as_ref()); + + Node::Hash(::hash(&concat)) + } +} diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs new file mode 100644 index 0000000000000000000000000000000000000000..a1aa57087a2545d43495a020e046cf6d9a52ad17 --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -0,0 +1,112 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A MMR storage implementations. + +use codec::Encode; +use crate::mmr::{NodeOf, Node}; +use crate::{NumberOfLeaves, Nodes, Module, Config, Instance, primitives}; +use frame_support::{StorageMap, StorageValue}; +#[cfg(not(feature = "std"))] +use sp_std::prelude::Vec; + +/// A marker type for runtime-specific storage implementation. +/// +/// Allows appending new items to the MMR and proof verification. +/// MMR nodes are appended to two different storages: +/// 1. We add nodes (leaves) hashes to the on-chain storge (see [crate::Nodes]). +/// 2. We add full leaves (and all inner nodes as well) into the `IndexingAPI` during block +/// processing, so the values end up in the Offchain DB if indexing is enabled. +pub struct RuntimeStorage; + +/// A marker type for offchain-specific storage implementation. +/// +/// Allows proof generation and verification, but does not support appending new items. +/// MMR nodes are assumed to be stored in the Off-Chain DB. Note this storage type +/// DOES NOT support adding new items to the MMR. +pub struct OffchainStorage; + +/// A storage layer for MMR. +/// +/// There are two different implementations depending on the use case. +/// See docs for [RuntimeStorage] and [OffchainStorage]. +pub struct Storage( + sp_std::marker::PhantomData<(StorageType, T, I, L)> +); + +impl Default for Storage { + fn default() -> Self { + Self(Default::default()) + } +} + +impl mmr_lib::MMRStore> for Storage where + T: Config, + I: Instance, + L: primitives::FullLeaf, +{ + fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + let key = Module::::offchain_key(pos); + // Retrieve the element from Off-chain DB. + Ok( + sp_io::offchain ::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + .and_then(|v| codec::Decode::decode(&mut &*v).ok()) + ) + } + + fn append(&mut self, _: u64, _: Vec>) -> mmr_lib::Result<()> { + panic!("MMR must not be altered in the off-chain context.") + } +} + +impl mmr_lib::MMRStore> for Storage where + T: Config, + I: Instance, + L: primitives::FullLeaf, +{ + fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + Ok(>::get(pos) + .map(Node::Hash) + ) + } + + fn append(&mut self, pos: u64, elems: Vec>) -> mmr_lib::Result<()> { + let mut leaves = crate::NumberOfLeaves::::get(); + let mut size = crate::mmr::utils::NodesUtils::new(leaves).size(); + if pos != size { + return Err(mmr_lib::Error::InconsistentStore); + } + + for elem in elems { + // on-chain we only store the hash (even if it's a leaf) + >::insert(size, elem.hash()); + // Indexing API is used to store the full leaf content. + elem.using_encoded(|elem| { + sp_io::offchain_index::set(&Module::::offchain_key(size), elem) + }); + size += 1; + + if let Node::Data(..) = elem { + leaves += 1; + } + } + + NumberOfLeaves::::put(leaves); + + Ok(()) + } +} diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..7a55605a64c9f13c1e8d78d6ff0d5e5c8495936e --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Merkle Mountain Range utilities. + +/// MMR nodes & size -related utilities. +pub struct NodesUtils { + no_of_leaves: u64, +} + +impl NodesUtils { + /// Create new instance of MMR nodes utilities for given number of leaves. + pub fn new(no_of_leaves: u64) -> Self { + Self { no_of_leaves } + } + + /// Calculate number of peaks in the MMR. + pub fn number_of_peaks(&self) -> u64 { + self.number_of_leaves().count_ones() as u64 + } + + /// Return the number of leaves in the MMR. + pub fn number_of_leaves(&self) -> u64 { + self.no_of_leaves + } + + /// Calculate the total size of MMR (number of nodes). + pub fn size(&self) -> u64 { + 2 * self.no_of_leaves - self.number_of_peaks() + } + + /// Calculate maximal depth of the MMR. + pub fn depth(&self) -> u32 { + if self.no_of_leaves == 0 { + return 0 + } + + 64 - self.no_of_leaves + .next_power_of_two() + .leading_zeros() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_calculate_number_of_leaves_correctly() { + assert_eq!( + vec![0, 1, 2, 3, 4, 9, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).depth()) + .collect::>(), + vec![0, 1, 2, 3, 3, 5, 5, 6] + ); + } + + #[test] + fn should_calculate_depth_correclty() { + assert_eq!( + vec![0, 1, 2, 3, 4, 9, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).number_of_leaves()) + .collect::>(), + vec![0, 1, 2, 3, 4, 9, 15, 21] + ); + } + + #[test] + fn should_calculate_number_of_peaks_correctly() { + assert_eq!( + vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).number_of_peaks()) + .collect::>(), + vec![0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 3] + ); + } + + #[test] + fn should_calculate_the_size_correctly() { + let _ = env_logger::try_init(); + + let leaves = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21]; + let sizes = vec![0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 39]; + assert_eq!( + leaves + .clone() + .into_iter() + .map(|n| NodesUtils::new(n).size()) + .collect::>(), + sizes.clone() + ); + + // size cross-check + let mut actual_sizes = vec![]; + for s in &leaves[1..] { + crate::tests::new_test_ext().execute_with(|| { + let mut mmr = crate::mmr::Mmr::< + crate::mmr::storage::RuntimeStorage, + crate::mock::Test, + crate::DefaultInstance, + _, + >::new(0); + for i in 0..*s { + mmr.push(i); + } + actual_sizes.push(mmr.size()); + }) + } + assert_eq!( + sizes[1..], + actual_sizes[..], + ); + } +} diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..c311d53446bb0c5f2ddf7d86bff9297052383415 --- /dev/null +++ b/frame/merkle-mountain-range/src/mock.rs @@ -0,0 +1,105 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use crate::primitives::{LeafDataProvider, Compact}; + +use codec::{Encode, Decode}; +use frame_support::{ + impl_outer_origin, parameter_types, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{ + BlakeTwo256, Keccak256, IdentityLookup, + }, +}; +use sp_std::cell::RefCell; +use sp_std::prelude::*; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +#[derive(Clone, Eq, PartialEq, Encode, Decode)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = sp_core::sr25519::Public; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); +} + +impl Config for Test { + const INDEXING_PREFIX: &'static [u8] = b"mmr-"; + + type Hashing = Keccak256; + type Hash = H256; + type LeafData = Compact, LeafData)>; + type OnNewRoot = (); + type WeightInfo = (); +} + +#[derive(Encode, Decode, Clone, Default, Eq, PartialEq, Debug)] +pub struct LeafData { + pub a: u64, + pub b: Vec, +} + +impl LeafData { + pub fn new(a: u64) -> Self { + Self { + a, + b: Default::default(), + } + } +} + +thread_local! { + pub static LEAF_DATA: RefCell = RefCell::new(Default::default()); +} + +impl LeafDataProvider for LeafData { + type LeafData = Self; + + fn leaf_data() -> Self::LeafData { + LEAF_DATA.with(|r| r.borrow().clone()) + } +} + +pub(crate) type MMR = Module; diff --git a/frame/merkle-mountain-range/src/primitives.rs b/frame/merkle-mountain-range/src/primitives.rs new file mode 100644 index 0000000000000000000000000000000000000000..cab4b6a0dc837723ad75e0c420ef23e3c915c78d --- /dev/null +++ b/frame/merkle-mountain-range/src/primitives.rs @@ -0,0 +1,415 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Merkle Mountain Range primitive types. + +use frame_support::RuntimeDebug; +use sp_runtime::traits; +use sp_std::fmt; +#[cfg(not(feature = "std"))] +use sp_std::prelude::Vec; + +/// A provider of the MMR's leaf data. +pub trait LeafDataProvider { + /// A type that should end up in the leaf of MMR. + type LeafData: FullLeaf; + + /// The method to return leaf data that should be placed + /// in the leaf node appended MMR at this block. + /// + /// This is being called by the `on_initialize` method of + /// this pallet at the very beginning of each block. + fn leaf_data() -> Self::LeafData; +} + +impl LeafDataProvider for () { + type LeafData = (); + + fn leaf_data() -> Self::LeafData { + () + } +} + +/// The most common use case for MMRs is to store historical block hashes, +/// so that any point in time in the future we can receive a proof about some past +/// blocks without using excessive on-chain storage. +/// Hence we implement the [LeafDataProvider] for [frame_system::Module], since the +/// current block hash is not available (since the block is not finished yet), +/// we use the `parent_hash` here. +impl LeafDataProvider for frame_system::Module { + type LeafData = ::Hash; + + fn leaf_data() -> Self::LeafData { + Self::parent_hash() + } +} + +/// New MMR root notification hook. +pub trait OnNewRoot { + /// Function called by the pallet in case new MMR root has been computed. + fn on_new_root(root: &Hash); +} + +/// No-op implementation of [OnNewRoot]. +impl OnNewRoot for () { + fn on_new_root(_root: &Hash) {} +} + +/// A full leaf content stored in the offchain-db. +pub trait FullLeaf: Clone + PartialEq + fmt::Debug + codec::Decode { + /// Encode the leaf either in it's full or compact form. + /// + /// NOTE the encoding returned here MUST be `Decode`able into `FullLeaf`. + fn using_encoded R>(&self, f: F, compact: bool) -> R; +} + +impl FullLeaf for T { + fn using_encoded R>(&self, f: F, _compact: bool) -> R { + codec::Encode::using_encoded(self, f) + } +} + +/// An element representing either full data or it's hash. +/// +/// See [Compact] to see how it may be used in practice to reduce the size +/// of proofs in case multiple [LeafDataProvider]s are composed together. +/// This is also used internally by the MMR to differentiate leaf nodes (data) +/// and inner nodes (hashes). +/// +/// [DataOrHash::hash] method calculates the hash of this element in it's compact form, +/// so should be used instead of hashing the encoded form (which will always be non-compact). +#[derive(RuntimeDebug, Clone, PartialEq)] +pub enum DataOrHash { + /// Arbitrary data in it's full form. + Data(L), + /// A hash of some data. + Hash(H::Output), +} + +impl From for DataOrHash { + fn from(l: L) -> Self { + Self::Data(l) + } +} + +mod encoding { + use super::*; + + /// A helper type to implement [codec::Codec] for [DataOrHash]. + #[derive(codec::Encode, codec::Decode)] + enum Either { + Left(A), + Right(B), + } + + impl codec::Encode for DataOrHash { + fn encode_to(&self, dest: &mut T) { + match self { + Self::Data(l) => l.using_encoded( + |data| Either::<&[u8], &H::Output>::Left(data).encode_to(dest), false + ), + Self::Hash(h) => Either::<&[u8], &H::Output>::Right(h).encode_to(dest), + } + } + } + + impl codec::Decode for DataOrHash { + fn decode(value: &mut I) -> Result { + let decoded: Either, H::Output> = Either::decode(value)?; + Ok(match decoded { + Either::Left(l) => DataOrHash::Data(L::decode(&mut &*l)?), + Either::Right(r) => DataOrHash::Hash(r), + }) + } + } +} + +impl DataOrHash { + /// Retrieve a hash of this item. + /// + /// Depending on the node type it's going to either be a contained value for [DataOrHash::Hash] + /// node, or a hash of SCALE-encoded [DataOrHash::Data] data. + pub fn hash(&self) -> H::Output { + match *self { + Self::Data(ref leaf) => leaf.using_encoded(::hash, true), + Self::Hash(ref hash) => hash.clone(), + } + } +} + +/// A composition of multiple leaf elements with compact form representation. +/// +/// When composing together multiple [LeafDataProvider]s you will end up with +/// a tuple of `LeafData` that each element provides. +/// +/// However this will cause the leaves to have significant size, while for some +/// use cases it will be enough to prove only one element of the tuple. +/// That's the rationale for [Compact] struct. We wrap each element of the tuple +/// into [DataOrHash] and each tuple element is hashed first before constructing +/// the final hash of the entire tuple. This allows you to replace tuple elements +/// you don't care about with their hashes. +#[derive(RuntimeDebug, Clone, PartialEq)] +pub struct Compact { + pub tuple: T, + _hash: sp_std::marker::PhantomData, +} + +impl sp_std::ops::Deref for Compact { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.tuple + } +} + +impl Compact { + pub fn new(tuple: T) -> Self { + Self { tuple, _hash: Default::default() } + } +} + +impl codec::Decode for Compact { + fn decode(value: &mut I) -> Result { + T::decode(value).map(Compact::new) + } +} + +macro_rules! impl_leaf_data_for_tuple { + ( $( $name:ident : $id:tt ),+ ) => { + /// [FullLeaf] implementation for `Compact, ...)>` + impl FullLeaf for Compact, )+ )> where + H: traits::Hash, + $( $name: FullLeaf ),+ + { + fn using_encoded R>(&self, f: F, compact: bool) -> R { + if compact { + codec::Encode::using_encoded(&( + $( DataOrHash::::Hash(self.tuple.$id.hash()), )+ + ), f) + } else { + codec::Encode::using_encoded(&self.tuple, f) + } + } + } + + /// [LeafDataProvider] implementation for `Compact, ...)>` + /// + /// This provides a compact-form encoding for tuples wrapped in [Compact]. + impl LeafDataProvider for Compact where + H: traits::Hash, + $( $name: LeafDataProvider ),+ + { + type LeafData = Compact< + H, + ( $( DataOrHash, )+ ), + >; + + fn leaf_data() -> Self::LeafData { + let tuple = ( + $( DataOrHash::Data($name::leaf_data()), )+ + ); + Compact::new(tuple) + } + } + + /// [LeafDataProvider] implementation for `(Tuple, ...)` + /// + /// This provides regular (non-compactable) composition of [LeafDataProvider]s. + impl<$( $name ),+> LeafDataProvider for ( $( $name, )+ ) where + ( $( $name::LeafData, )+ ): FullLeaf, + $( $name: LeafDataProvider ),+ + { + type LeafData = ( $( $name::LeafData, )+ ); + + fn leaf_data() -> Self::LeafData { + ( + $( $name::leaf_data(), )+ + ) + } + } + } +} + +/// Test functions implementation for `Compact, ...)>` +#[cfg(test)] +impl Compact, DataOrHash)> where + H: traits::Hash, + A: FullLeaf, + B: FullLeaf, +{ + /// Retrieve a hash of this item in it's compact form. + pub fn hash(&self) -> H::Output { + self.using_encoded(::hash, true) + } +} + +impl_leaf_data_for_tuple!(A:0); +impl_leaf_data_for_tuple!(A:0, B:1); +impl_leaf_data_for_tuple!(A:0, B:1, C:2); +impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); +impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); + +/// A MMR proof data for one of the leaves. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct Proof { + /// The index of the leaf the proof is for. + pub leaf_index: u64, + /// Number of leaves in MMR, when the proof was generated. + pub leaf_count: u64, + /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). + pub items: Vec, +} + + +#[cfg(test)] +mod tests { + use super::*; + + use codec::Decode; + use crate::tests::hex; + use sp_runtime::traits::Keccak256; + + type Test = DataOrHash; + type TestCompact = Compact; + type TestProof = Proof<::Output>; + + #[test] + fn should_encode_decode_proof() { + // given + let proof: TestProof = Proof { + leaf_index: 5, + leaf_count: 10, + items: vec![ + hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + hex("d3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + hex("e3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + ], + }; + + // when + let encoded = codec::Encode::encode(&proof); + let decoded = TestProof::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(proof)); + } + + #[test] + fn should_encode_decode_correctly_if_no_compact() { + // given + let cases = vec![ + Test::Data("Hello World!".into()), + Test::Hash(hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")), + Test::Data("".into()), + Test::Data("3e48d6bcd417fb22e044747242451e2c0f3e602d1bcad2767c34808621956417".into()), + ]; + + // when + let encoded = cases + .iter() + .map(codec::Encode::encode) + .collect::>(); + + let decoded = encoded + .iter() + .map(|x| Test::decode(&mut &**x)) + .collect::>(); + + // then + assert_eq!(decoded, cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>()); + // check encoding correctness + assert_eq!(&encoded[0], &hex_literal::hex!("00343048656c6c6f20576f726c6421")); + assert_eq!( + encoded[1].as_slice(), + hex_literal::hex!( + "01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd" + ).as_ref() + ); + } + + #[test] + fn should_return_the_hash_correctly() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Hash(hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")); + + // when + let a = a.hash(); + let b = b.hash(); + + // then + assert_eq!(a, hex("a9c321be8c24ba4dc2bd73f5300bde67dc57228ab8b68b607bb4c39c5374fac9")); + assert_eq!(b, hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")); + } + + #[test] + fn compact_should_work() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + // when + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new(( + Test::Hash(a.hash()), + Test::Hash(b.hash()), + )); + + // then + assert_eq!(c.hash(), d.hash()); + } + + #[test] + fn compact_should_encode_decode_correctly() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new(( + Test::Hash(a.hash()), + Test::Hash(b.hash()), + )); + let cases = vec![c, d.clone()]; + + // when + let encoded_compact = cases + .iter() + .map(|c| c.using_encoded(|x| x.to_vec(), true)) + .collect::>(); + + let encoded = cases + .iter() + .map(|c| c.using_encoded(|x| x.to_vec(), false)) + .collect::>(); + + let decoded_compact = encoded_compact + .iter() + .map(|x| TestCompact::decode(&mut &**x)) + .collect::>(); + + let decoded = encoded + .iter() + .map(|x| TestCompact::decode(&mut &**x)) + .collect::>(); + + // then + assert_eq!(decoded, cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>()); + + assert_eq!(decoded_compact, vec![Ok(d.clone()), Ok(d.clone())]); + } +} diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..059ff6612f1b44379fbda902001ff5eb7503b52f --- /dev/null +++ b/frame/merkle-mountain-range/src/tests.rs @@ -0,0 +1,275 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use crate::mock::*; +use crate::primitives::{Proof, Compact}; + +use frame_support::traits::OnInitialize; +use sp_core::{ + H256, + offchain::{ + testing::TestOffchainExt, + OffchainExt, + }, +}; + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() +} + +fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainExt::new(offchain)); +} + +fn new_block() -> u64 { + let number = frame_system::Module::::block_number() + 1; + let hash = H256::repeat_byte(number as u8); + LEAF_DATA.with(|r| r.borrow_mut().a = number); + + frame_system::Module::::initialize( + &number, + &hash, + &Default::default(), + &Default::default(), + frame_system::InitKind::Full, + ); + MMR::on_initialize(number) +} + +pub(crate) fn hex(s: &str) -> H256 { + s.parse().unwrap() +} + +fn decode_node(v: Vec) -> mmr::Node< + ::Hashing, + (H256, LeafData), +> { + use crate::primitives::DataOrHash; + type A = DataOrHash::<::Hashing, H256>; + type B = DataOrHash::<::Hashing, LeafData>; + type Node = mmr::Node<::Hashing, (A, B)>; + let tuple: Node = codec::Decode::decode(&mut &v[..]).unwrap(); + + match tuple { + mmr::Node::Data((DataOrHash::Data(a), DataOrHash::Data(b))) => mmr::Node::Data((a, b)), + mmr::Node::Hash(hash) => mmr::Node::Hash(hash), + _ => unreachable!(), + } +} + +fn init_chain(blocks: usize) { + // given + for _ in 0..blocks { + new_block(); + } +} + +#[test] +fn should_start_empty() { + let _ = env_logger::try_init(); + new_test_ext().execute_with(|| { + // given + assert_eq!( + crate::RootHash::::get(), + "0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap() + ); + assert_eq!(crate::NumberOfLeaves::::get(), 0); + assert_eq!(crate::Nodes::::get(0), None); + + // when + let weight = new_block(); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 1); + assert_eq!(crate::Nodes::::get(0), + Some(hex("da5e6d0616e05c6a6348605a37ca33493fc1a15ad1e6a405ee05c17843fdafed"))); + assert_eq!( + crate::RootHash::::get(), + hex("da5e6d0616e05c6a6348605a37ca33493fc1a15ad1e6a405ee05c17843fdafed") + ); + assert!(weight != 0); + }); +} + +#[test] +fn should_append_to_mmr_when_on_initialize_is_called() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + ext.execute_with(|| { + // when + new_block(); + new_block(); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 2); + assert_eq!(crate::Nodes::::get(0), + Some(hex("da5e6d0616e05c6a6348605a37ca33493fc1a15ad1e6a405ee05c17843fdafed"))); + assert_eq!(crate::Nodes::::get(1), + Some(hex("ff5d891b28463a3440e1b650984685efdf260e482cb3807d53c49090841e755f"))); + assert_eq!(crate::Nodes::::get(2), + Some(hex("bc54778fab79f586f007bd408dca2c4aa07959b27d1f2c8f4f2549d1fcfac8f8"))); + assert_eq!(crate::Nodes::::get(3), None); + assert_eq!( + crate::RootHash::::get(), + hex("bc54778fab79f586f007bd408dca2c4aa07959b27d1f2c8f4f2549d1fcfac8f8") + ); + }); + + // make sure the leaves end up in the offchain DB + ext.persist_offchain_overlay(); + let offchain_db = ext.offchain_db(); + assert_eq!(offchain_db.get(&MMR::offchain_key(0)).map(decode_node), Some(mmr::Node::Data(( + H256::repeat_byte(1), + LeafData::new(1), + )))); + assert_eq!(offchain_db.get(&MMR::offchain_key(1)).map(decode_node), Some(mmr::Node::Data(( + H256::repeat_byte(2), + LeafData::new(2), + )))); + assert_eq!(offchain_db.get(&MMR::offchain_key(2)).map(decode_node), Some(mmr::Node::Hash( + hex("bc54778fab79f586f007bd408dca2c4aa07959b27d1f2c8f4f2549d1fcfac8f8") + ))); + assert_eq!(offchain_db.get(&MMR::offchain_key(3)), None); +} + +#[test] +fn should_construct_larger_mmr_correctly() { + let _ = env_logger::try_init(); + new_test_ext().execute_with(|| { + // when + init_chain(7); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 7); + assert_eq!(crate::Nodes::::get(0), + Some(hex("da5e6d0616e05c6a6348605a37ca33493fc1a15ad1e6a405ee05c17843fdafed"))); + assert_eq!(crate::Nodes::::get(10), + Some(hex("af3327deed0515c8d1902c9b5cd375942d42f388f3bfe3d1cd6e1b86f9cc456c"))); + assert_eq!( + crate::RootHash::::get(), + hex("fc4f9042bd2f73feb26f3fc42db834c5f1943fa20070ddf106c486a478a0d561") + ); + }); +} + +#[test] +fn should_generate_proofs_correctly() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + // given + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proofs now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + ext.execute_with(|| { + // when generate proofs for all leaves + let proofs = (0_u64..crate::NumberOfLeaves::::get()) + .into_iter() + .map(|leaf_index| crate::Module::::generate_proof(leaf_index).unwrap()) + .collect::>(); + + // then + assert_eq!(proofs[0], (Compact::new(( + H256::repeat_byte(1).into(), + LeafData::new(1).into(), + )), Proof { + leaf_index: 0, + leaf_count: 7, + items: vec![ + hex("ff5d891b28463a3440e1b650984685efdf260e482cb3807d53c49090841e755f"), + hex("00b0046bd2d63fcb760cf50a262448bb2bbf9a264b0b0950d8744044edf00dc3"), + hex("16de0900b57bf359a0733674ebfbba0f494e95a8391b4bfeae850019399f3ec0"), + ], + })); + assert_eq!(proofs[4], (Compact::new(( + H256::repeat_byte(5).into(), + LeafData::new(5).into(), + )), Proof { + leaf_index: 4, + leaf_count: 7, + items: vec![ + hex("e53ee36ba6c068b1a6cfef7862fed5005df55615e1c9fa6eeefe08329ac4b94b"), + hex("c09d4a008a0f1ef37860bef33ec3088ccd94268c0bfba7ff1b3c2a1075b0eb92"), + hex("af3327deed0515c8d1902c9b5cd375942d42f388f3bfe3d1cd6e1b86f9cc456c"), + ], + })); + assert_eq!(proofs[6], (Compact::new(( + H256::repeat_byte(7).into(), + LeafData::new(7).into(), + )), Proof { + leaf_index: 6, + leaf_count: 7, + items: vec![ + hex("e53ee36ba6c068b1a6cfef7862fed5005df55615e1c9fa6eeefe08329ac4b94b"), + hex("dad09f50b41822fc5ecadc25b08c3a61531d4d60e962a5aa0b6998fad5c37c5e"), + ], + })); + }); +} + +#[test] +fn should_verify() { + let _ = env_logger::try_init(); + + // Start off with chain initialisation and storing indexing data off-chain + // (MMR Leafs) + let mut ext = new_test_ext(); + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proof now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + let (leaf, proof5) = ext.execute_with(|| { + // when + crate::Module::::generate_proof(5).unwrap() + }); + + // Now to verify the proof, we really shouldn't require offchain storage or extension. + // Hence we initialize the storage once again, using different externalities and then + // verify. + let mut ext2 = new_test_ext(); + ext2.execute_with(|| { + init_chain(7); + // then + assert_eq!(crate::Module::::verify_leaf(leaf, proof5), Ok(())); + }); +} + +#[test] +fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + // given + ext.execute_with(|| init_chain(7)); + + ext.persist_offchain_overlay(); + register_offchain_ext(&mut ext); + + ext.execute_with(|| { + // when + let (leaf, proof5) = crate::Module::::generate_proof(5).unwrap(); + new_block(); + + // then + assert_eq!(crate::Module::::verify_leaf(leaf, proof5), Ok(())); + }); +} diff --git a/frame/multisig/README.md b/frame/multisig/README.md index 2209e876f844123bedab34e39f897b32fc5e3c0b..a18ef74163d098b05e86f7fc320d04c7cb5ef395 100644 --- a/frame/multisig/README.md +++ b/frame/multisig/README.md @@ -24,6 +24,6 @@ not available or desired. * `cancel_as_multi` - Cancel a call from a composite origin. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index bf89ec8b09bd4772a144444b9bd5d52558603c48..0b549b3d94717c3227a8725d03b83aba5e6beca2 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -29,7 +29,7 @@ use crate::Module as Multisig; const SEED: u32 = 0; -fn setup_multi(s: u32, z: u32) +fn setup_multi(s: u32, z: u32) -> Result<(Vec, Vec), &'static str> { let mut signatories: Vec = Vec::new(); @@ -42,7 +42,7 @@ fn setup_multi(s: u32, z: u32) } signatories.sort(); // Must first convert to outer call type. - let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); let call_data = call.encode(); return Ok((signatories, call_data)) } @@ -55,7 +55,7 @@ benchmarks! { let z in 0 .. 10_000; let max_signatories = T::MaxSignatories::get().into(); let (mut signatories, _) = setup_multi::(max_signatories, z)?; - let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, 1); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 873508259a8d7c73549b79e09c5adc099df03fd5..b39b979f999d03fb72d564e82d1a8a125db9bd7c 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -18,7 +18,7 @@ //! # Multisig Module //! A module for doing multisig dispatch. //! -//! - [`multisig::Trait`](./trait.Trait.html) +//! - [`multisig::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -41,7 +41,7 @@ //! * `cancel_as_multi` - Cancel a call from a composite origin. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -62,14 +62,14 @@ use frame_system::{self as system, ensure_signed, RawOrigin}; use sp_runtime::{DispatchError, DispatchResult, traits::{Dispatchable, Zero}}; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Just a bunch of bytes, but they should decode to a valid `Call`. pub type OpaqueCall = Vec; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable @@ -123,7 +123,7 @@ pub struct Multisig { } decl_storage! { - trait Store for Module as Multisig { + trait Store for Module as Multisig { /// The set of open multisig operations. pub Multisigs: double_map hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) [u8; 32] @@ -134,7 +134,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Threshold must be 2 or greater. MinimumThreshold, /// Call is already approved by this signatory. @@ -169,8 +169,8 @@ decl_error! { decl_event! { /// Events type. pub enum Event where - AccountId = ::AccountId, - BlockNumber = ::BlockNumber, + AccountId = ::AccountId, + BlockNumber = ::BlockNumber, CallHash = [u8; 32] { /// A new multisig operation has begun. \[approving, multisig, call_hash\] @@ -191,7 +191,7 @@ enum CallOrHash { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// Deposit one of this module's events by using the default implementation. @@ -232,7 +232,7 @@ decl_module! { )] fn as_multi_threshold_1(origin, other_signatories: Vec, - call: Box<::Call>, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let max_sigs = T::MaxSignatories::get() as usize; @@ -443,7 +443,7 @@ decl_module! { } } -impl Module { +impl Module { /// Derive a multi-account ID from the sorted list of accounts and the threshold that are /// required. /// @@ -615,7 +615,7 @@ impl Module { } /// Attempt to decode and return the call, provided by the user or from storage. - fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<(::Call, usize)> { + fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<(::Call, usize)> { maybe_known.map_or_else(|| { Calls::::get(hash).and_then(|(data, ..)| { Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index ca15e04597eaafff1779107183ac974012c68040..7a959ec37f283c0cfe3a31c2fa4cdaf90debb59b 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -23,10 +23,10 @@ use super::*; use frame_support::{ assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event, traits::Filter, + impl_outer_event, traits::Filter, }; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use crate as multisig; impl_outer_origin! { @@ -55,12 +55,14 @@ impl_outer_dispatch! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -72,13 +74,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -89,7 +84,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = TestEvent; @@ -114,7 +109,7 @@ impl Filter for TestBaseCallFilter { } } } -impl Trait for Test { +impl Config for Test { type Event = TestEvent; type Call = Call; type Currency = Balances; diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index ab55b181f5a5014e2f2c954c823977c32f1915de..c0f6399e76420510ae0c4a535d6f2db60266d453 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -58,7 +58,7 @@ pub trait WeightInfo { /// Weights for pallet_multisig using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn as_multi_threshold_1(z: u32, ) -> Weight { (14_183_000 as Weight) .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) diff --git a/frame/nicks/README.md b/frame/nicks/README.md index b4c88eff43152162ba68ca08d7b794617d8b9c24..766108470bedf14b4bae95f8c35bc6f1558f1baa 100644 --- a/frame/nicks/README.md +++ b/frame/nicks/README.md @@ -20,6 +20,6 @@ have not been designed to be economically secure. Do not use this pallet as-is i * `kill_name` - Forcibly remove the associated name; the deposit is lost. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index ddeadfb7680fef1b27217f5e65bc394c808acdf8..2b74f323d872c8ca8dd440e9ca1c481dc0760d38 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -17,7 +17,7 @@ //! # Nicks Module //! -//! - [`nicks::Trait`](./trait.Trait.html) +//! - [`nicks::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -37,7 +37,7 @@ //! * `kill_name` - Forcibly remove the associated name; the deposit is lost. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] @@ -51,12 +51,12 @@ use frame_support::{ }; use frame_system::ensure_signed; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency trait. type Currency: ReservableCurrency; @@ -78,14 +78,14 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Nicks { + trait Store for Module as Nicks { /// The lookup table for names. NameOf: map hasher(twox_64_concat) T::AccountId => Option<(Vec, BalanceOf)>; } } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { /// A name was set. \[who\] NameSet(AccountId), /// A name was forcibly set. \[target\] @@ -101,7 +101,7 @@ decl_event!( decl_error! { /// Error for the nicks module. - pub enum Error for Module { + pub enum Error for Module { /// A name is too short. TooShort, /// A name is too long. @@ -113,7 +113,7 @@ decl_error! { decl_module! { /// Nicks module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -241,13 +241,13 @@ mod tests { use super::*; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, + assert_ok, assert_noop, impl_outer_origin, parameter_types, ord_parameter_types }; use sp_core::H256; use frame_system::EnsureSignedBy; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, }; impl_outer_origin! { @@ -258,12 +258,14 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -275,13 +277,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -292,7 +287,7 @@ mod tests { parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = (); @@ -309,7 +304,7 @@ mod tests { ord_parameter_types! { pub const One: u64 = 1; } - impl Trait for Test { + impl Config for Test { type Event = (); type Currency = Balances; type ReservationFee = ReservationFee; diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 91f89ad1d91001355e6bd4ac7afa0194ed8fc2b5..9641bea116a028e262840d5e2585ecee2e2c30b3 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -76,9 +76,9 @@ impl WeightInfo for () { fn remove_connections() -> Weight { 50_000_000 } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The event type of this module. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The maximum number of well known nodes that are allowed to set type MaxWellKnownNodes: Get; @@ -103,7 +103,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as NodeAuthorization { + trait Store for Module as NodeAuthorization { /// The set of well known nodes. This is stored sorted (just by value). pub WellKnownNodes get(fn well_known_nodes): BTreeSet; /// A map that maintains the ownership of each node. @@ -123,7 +123,7 @@ decl_storage! { decl_event! { pub enum Event where - ::AccountId, + ::AccountId, { /// The given well known node was added. NodeAdded(PeerId, AccountId), @@ -149,7 +149,7 @@ decl_event! { decl_error! { /// Error for the node authorization module. - pub enum Error for Module { + pub enum Error for Module { /// The PeerId is too long. PeerIdTooLong, /// Too many well known nodes. @@ -170,7 +170,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The maximum number of authorized well known nodes const MaxWellKnownNodes: u32 = T::MaxWellKnownNodes::get(); @@ -267,7 +267,7 @@ decl_module! { pub fn reset_well_known_nodes(origin, nodes: Vec<(PeerId, T::AccountId)>) { T::ResetOrigin::ensure_origin(origin)?; ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); - + Self::initialize_nodes(&nodes); Self::deposit_event(RawEvent::NodesReset(nodes)); @@ -280,7 +280,7 @@ decl_module! { #[weight = T::WeightInfo::claim_node()] pub fn claim_node(origin, node: PeerId) { let sender = ensure_signed(origin)?; - + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); ensure!(!Owners::::contains_key(&node),Error::::AlreadyClaimed); @@ -403,7 +403,7 @@ decl_module! { } } -impl Module { +impl Module { fn initialize_nodes(nodes: &Vec<(PeerId, T::AccountId)>) { let peer_ids = nodes.iter() .map(|item| item.0.clone()) @@ -433,12 +433,12 @@ mod tests { use super::*; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, weights::Weight, + assert_ok, assert_noop, impl_outer_origin, parameter_types, ord_parameter_types, }; use frame_system::EnsureSignedBy; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; impl_outer_origin! { pub enum Origin for Test where system = frame_system {} @@ -449,12 +449,12 @@ mod tests { parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -466,13 +466,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -491,7 +484,7 @@ mod tests { pub const MaxWellKnownNodes: u32 = 4; pub const MaxPeerIdLength: u32 = 2; } - impl Trait for Test { + impl Config for Test { type Event = (); type MaxWellKnownNodes = MaxWellKnownNodes; type MaxPeerIdLength = MaxPeerIdLength; diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 47055eab73d4a30ac99a0940dedc21400489fbd0..1d133c1b613bc4aed0f9f4f2da847332689c58eb 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -24,22 +24,22 @@ mod mock; use sp_std::prelude::*; use sp_std::vec; -use frame_system::{RawOrigin, Module as System, Trait as SystemTrait}; +use frame_system::{RawOrigin, Module as System, Config as SystemConfig}; use frame_benchmarking::{benchmarks, account}; use frame_support::traits::{Currency, OnInitialize}; use sp_runtime::{Perbill, traits::{Convert, StaticLookup, Saturating, UniqueSaturatedInto}}; use sp_staking::offence::{ReportOffence, Offence, OffenceDetails}; -use pallet_balances::{Trait as BalancesTrait}; +use pallet_balances::{Config as BalancesConfig}; use pallet_babe::BabeEquivocationOffence; use pallet_grandpa::{GrandpaEquivocationOffence, GrandpaTimeSlot}; -use pallet_im_online::{Trait as ImOnlineTrait, Module as ImOnline, UnresponsivenessOffence}; -use pallet_offences::{Trait as OffencesTrait, Module as Offences}; -use pallet_session::historical::{Trait as HistoricalTrait, IdentificationTuple}; -use pallet_session::{Trait as SessionTrait, SessionManager}; +use pallet_im_online::{Config as ImOnlineConfig, Module as ImOnline, UnresponsivenessOffence}; +use pallet_offences::{Config as OffencesConfig, Module as Offences}; +use pallet_session::historical::{Config as HistoricalConfig, IdentificationTuple}; +use pallet_session::{Config as SessionConfig, SessionManager}; use pallet_staking::{ - Module as Staking, Trait as StakingTrait, RewardDestination, ValidatorPrefs, + Module as Staking, Config as StakingConfig, RewardDestination, ValidatorPrefs, Exposure, IndividualExposure, ElectionStatus, MAX_NOMINATIONS, Event as StakingEvent }; @@ -50,47 +50,47 @@ const MAX_OFFENDERS: u32 = 100; const MAX_NOMINATORS: u32 = 100; const MAX_DEFERRED_OFFENCES: u32 = 100; -pub struct Module(Offences); +pub struct Module(Offences); -pub trait Trait: - SessionTrait - + StakingTrait - + OffencesTrait - + ImOnlineTrait - + HistoricalTrait - + BalancesTrait +pub trait Config: + SessionConfig + + StakingConfig + + OffencesConfig + + ImOnlineConfig + + HistoricalConfig + + BalancesConfig + IdTupleConvert {} /// A helper trait to make sure we can convert `IdentificationTuple` coming from historical /// and the one required by offences. -pub trait IdTupleConvert { +pub trait IdTupleConvert { /// Convert identification tuple from `historical` trait to the one expected by `offences`. - fn convert(id: IdentificationTuple) -> ::IdentificationTuple; + fn convert(id: IdentificationTuple) -> ::IdentificationTuple; } -impl IdTupleConvert for T where - ::IdentificationTuple: From> +impl IdTupleConvert for T where + ::IdentificationTuple: From> { - fn convert(id: IdentificationTuple) -> ::IdentificationTuple { + fn convert(id: IdentificationTuple) -> ::IdentificationTuple { id.into() } } -type LookupSourceOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type LookupSourceOf = <::Lookup as StaticLookup>::Source; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -struct Offender { +struct Offender { pub controller: T::AccountId, pub stash: T::AccountId, pub nominator_stashes: Vec, } -fn bond_amount() -> BalanceOf { +fn bond_amount() -> BalanceOf { T::Currency::minimum_balance().saturating_mul(10_000u32.into()) } -fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { +fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { let stash: T::AccountId = account("stash", n, SEED); let controller: T::AccountId = account("controller", n, SEED); let controller_lookup: LookupSourceOf = T::Lookup::unlookup(controller.clone()); @@ -149,7 +149,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &'s Ok(Offender { controller, stash, nominator_stashes }) } -fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< +fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< (Vec>, Vec>), &'static str > { @@ -165,10 +165,10 @@ fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< let id_tuples = offenders.iter() .map(|offender| - ::ValidatorIdOf::convert(offender.controller.clone()) + ::ValidatorIdOf::convert(offender.controller.clone()) .expect("failed to get validator id from account id")) .map(|validator_id| - ::FullIdentificationOf::convert(validator_id.clone()) + ::FullIdentificationOf::convert(validator_id.clone()) .map(|full_id| (validator_id, full_id)) .expect("failed to convert validator id to full identification")) .collect::>>(); @@ -176,7 +176,7 @@ fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< } #[cfg(test)] -fn check_events::Event>>(expected: I) { +fn check_events::Event>>(expected: I) { let events = System::::events() .into_iter() .map(|frame_system::EventRecord { event, .. }| event).collect::>(); let expected = expected.collect::>(); @@ -235,7 +235,7 @@ benchmarks! { }; assert_eq!(System::::event_count(), 0); }: { - let _ = ::ReportUnresponsiveness::report_offence( + let _ = ::ReportUnresponsiveness::report_offence( reporters.clone(), offence ); @@ -250,14 +250,14 @@ benchmarks! { .flat_map(|offender| { core::iter::once(offender.stash).chain(offender.nominator_stashes.into_iter()) }) - .map(|stash| ::Event::from( + .map(|stash| ::Event::from( StakingEvent::::Slash(stash, BalanceOf::::from(slash_amount)) )) .collect::>(); let reward_events = reporters.into_iter() .flat_map(|reporter| vec![ frame_system::Event::::NewAccount(reporter.clone()).into(), - ::Event::from( + ::Event::from( pallet_balances::Event::::Endowed(reporter, (reward_amount / r).into()) ).into() ]); @@ -272,7 +272,7 @@ benchmarks! { .chain(slash_events.into_iter().map(Into::into)) .chain(reward_events) .chain(slash_rest.into_iter().map(Into::into)) - .chain(std::iter::once(::Event::from( + .chain(std::iter::once(::Event::from( pallet_offences::Event::Offence( UnresponsivenessOffence::::ID, 0_u32.to_le_bytes().to_vec(), diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 527e0ede81ab98caa31d2c3728763ba8f0bd0e42..e55d7ac8e3a76f4800b26cf2c1ca25a73fba835c 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -37,11 +37,15 @@ type BlockNumber = u64; type Balance = u64; parameter_types! { - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -53,24 +57,17 @@ impl frame_system::Trait for Test { type Header = sp_runtime::testing::Header; type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (Balances,); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = Balance; type Event = Event; @@ -83,13 +80,13 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -120,7 +117,7 @@ parameter_types! { pub const Offset: u64 = 0; } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -149,7 +146,7 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -174,7 +171,7 @@ impl pallet_staking::Trait for Test { type WeightInfo = (); } -impl pallet_im_online::Trait for Test { +impl pallet_im_online::Config for Test { type AuthorityId = UintAuthorityId; type Event = Event; type SessionDuration = Period; @@ -184,10 +181,10 @@ impl pallet_im_online::Trait for Test { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Test { +impl pallet_offences::Config for Test { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; @@ -199,7 +196,7 @@ impl frame_system::offchain::SendTransactionTypes for Test where Call: Fro type OverarchingCall = Call; } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index e72498273cec014619780f8eb702cd535f3a5c8f..e3f01823c18fcd27871f93bcfec2c646d15a595a 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -42,11 +42,11 @@ use codec::{Encode, Decode}; type OpaqueTimeSlot = Vec; /// A type alias for a report identifier. -type ReportIdOf = ::Hash; +type ReportIdOf = ::Hash; /// Type of data stored as a deferred offence pub type DeferredOffenceOf = ( - Vec::AccountId, ::IdentificationTuple>>, + Vec::AccountId, ::IdentificationTuple>>, Vec, SessionIndex, ); @@ -66,9 +66,9 @@ impl WeightInfo for () { } /// Offences trait -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// Full identification of the validator. type IdentificationTuple: Parameter + Ord; /// A handler called for every offence report. @@ -80,7 +80,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Offences { + trait Store for Module as Offences { /// The primary structure that holds all offence records keyed by report identifiers. Reports get(fn reports): map hasher(twox_64_concat) ReportIdOf @@ -116,7 +116,7 @@ decl_event!( ); decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; fn on_initialize(now: T::BlockNumber) -> Weight { @@ -158,7 +158,7 @@ decl_module! { } } -impl> +impl> ReportOffence for Module where T::IdentificationTuple: Clone, @@ -210,7 +210,7 @@ where } } -impl Module { +impl Module { /// Tries (without checking) to report an offence. Stores them in [`DeferredOffences`] in case /// it fails. Returns false in case it has to store the offence. fn report_or_store_offence( @@ -293,7 +293,7 @@ impl Module { } } -struct TriageOutcome { +struct TriageOutcome { /// Other reports for the same report kinds. concurrent_offenders: Vec>, } @@ -304,13 +304,13 @@ struct TriageOutcome { /// This struct is responsible for aggregating storage writes and the underlying storage should not /// accessed directly meanwhile. #[must_use = "The changes are not saved without called `save`"] -struct ReportIndexStorage> { +struct ReportIndexStorage> { opaque_time_slot: OpaqueTimeSlot, concurrent_reports: Vec>, same_kind_reports: Vec<(O::TimeSlot, ReportIdOf)>, } -impl> ReportIndexStorage { +impl> ReportIndexStorage { /// Preload indexes from the storage for the specific `time_slot` and the kind of the offence. fn load(time_slot: &O::TimeSlot) -> Self { let opaque_time_slot = time_slot.encode(); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 58ee97a9bcbb51465a8d464a4a0b12a2ce38c5b0..124b0030294046cb134a017b8094aad2217658ef 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -20,7 +20,7 @@ #![cfg(test)] use std::cell::RefCell; -use crate::{Module, Trait}; +use crate::{Module, Config}; use codec::Encode; use sp_runtime::Perbill; use sp_staking::{ @@ -91,12 +91,14 @@ pub fn set_offence_weight(new: Weight) { pub struct Runtime; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -108,13 +110,6 @@ impl frame_system::Trait for Runtime { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -124,10 +119,11 @@ impl frame_system::Trait for Runtime { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = + Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl Trait for Runtime { +impl Config for Runtime { type Event = TestEvent; type IdentificationTuple = u64; type OnOffenceHandler = OnOffenceHandler; diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index ca9f46a198820ba93a2618803fab956f610636f2..18582ec042ca82b943d60220a88f8f40aa7ac831 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -342,7 +342,7 @@ fn weight_soft_limit_is_used() { new_test_ext().execute_with(|| { set_can_report(false); // Only 2 can fit in one block - set_offence_weight(::WeightSoftLimit::get() / 2); + set_offence_weight(::WeightSoftLimit::get() / 2); // Queue 3 offences // #1 diff --git a/frame/proxy/README.md b/frame/proxy/README.md index 26969db638289fbd7bdc3171460cb01e7f7bf850..20c4d2bf20b8251e665a71595e3de2428e3766d3 100644 --- a/frame/proxy/README.md +++ b/frame/proxy/README.md @@ -16,6 +16,6 @@ reject the announcement and in doing so, veto the execution. ### Dispatchable Functions [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 5f1d79741dd8e02cec922401b2be94c128a3eb61..ac0fa52c9707096133188f10f75feb10c3888612 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -27,15 +27,15 @@ use crate::Module as Proxy; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } -fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { +fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| whitelisted_caller()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); for i in 0..n { @@ -49,7 +49,7 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), Ok(()) } -fn add_announcements( +fn add_announcements( n: u32, maybe_who: Option, maybe_real: Option @@ -91,7 +91,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) @@ -106,7 +106,7 @@ benchmarks! { T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), real.clone(), @@ -126,7 +126,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real.clone(), @@ -147,7 +147,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real.clone(), @@ -169,7 +169,7 @@ benchmarks! { // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark(vec![]).into(); let call_hash = T::CallHasher::hash_of(&call); }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) verify { diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 75ab3902dc8dbc928ef50b229823e4de77ffe360..6342f0c052b82b758e67a4b15064f718e0f890c8 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -23,7 +23,7 @@ //! wish to execute some duration prior to execution happens. In this case, the target account may //! reject the announcement and in doing so, veto the execution. //! -//! - [`proxy::Trait`](./trait.Trait.html) +//! - [`proxy::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -33,7 +33,7 @@ //! ### Dispatchable Functions //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -55,17 +55,17 @@ use frame_system::{self as system, ensure_signed}; use frame_support::dispatch::DispatchError; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable + GetDispatchInfo + From> + IsSubType> - + IsType<::Call>; + + IsType<::Call>; /// The currency mechanism. type Currency: ReservableCurrency; @@ -74,7 +74,7 @@ pub trait Trait: frame_system::Trait { /// The instance filter determines whether a given call may be proxied under this type. /// /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. - type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> + type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> + Default; /// The base amount of currency needed to reserve for creating a proxy. @@ -137,10 +137,10 @@ pub struct Announcement { height: BlockNumber, } -type CallHashOf = <::CallHasher as Hash>::Output; +type CallHashOf = <::CallHasher as Hash>::Output; decl_storage! { - trait Store for Module as Proxy { + trait Store for Module as Proxy { /// The set of account proxies. Maps the account which has delegated to the accounts /// which are being delegated to, together with the amount held on deposit. pub Proxies get(fn proxies): map hasher(twox_64_concat) T::AccountId @@ -153,7 +153,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// There are too many proxies registered or too many announcements pending. TooMany, /// Proxy registration not found. @@ -174,8 +174,8 @@ decl_error! { decl_event! { /// Events type. pub enum Event where - AccountId = ::AccountId, - ProxyType = ::ProxyType, + AccountId = ::AccountId, + ProxyType = ::ProxyType, Hash = CallHashOf, { /// A proxy was executed correctly, with the given \[result\]. @@ -189,7 +189,7 @@ decl_event! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// Deposit one of this module's events by using the default implementation. @@ -239,7 +239,7 @@ decl_module! { fn proxy(origin, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call>, + call: Box<::Call>, ) { let who = ensure_signed(origin)?; let def = Self::find_proxy(&real, &who, force_proxy_type)?; @@ -509,7 +509,7 @@ decl_module! { delegate: T::AccountId, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call>, + call: Box<::Call>, ) { ensure_signed(origin)?; let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; @@ -525,7 +525,7 @@ decl_module! { } } -impl Module { +impl Module { /// Calculate the address of an anonymous account. /// @@ -680,12 +680,12 @@ impl Module { fn do_proxy( def: ProxyDefinition, real: T::AccountId, - call: ::Call, + call: ::Call, ) { // This is a freshly authenticated new account, the origin restrictions doesn't apply. let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); - origin.add_filter(move |c: &::Call| { - let c = ::Call::from_ref(c); + origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); // We make sure the proxy call does access this pallet to change modify proxies. match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already has. @@ -706,7 +706,7 @@ impl Module { pub mod migration { use super::*; - /// Migration code for https://github.com/paritytech/substrate/pull/6770 + /// Migration code for /// /// Details: This migration was introduced between Substrate 2.0-RC6 and Substrate 2.0 releases. /// Before this migration, the `Proxies` storage item used a tuple of `AccountId` and @@ -714,7 +714,7 @@ pub mod migration { /// `ProxyDefinition` which additionally included a `BlockNumber` delay value. This function, /// simply takes any existing proxies using the old tuple format, and migrates it to the new /// struct by setting the delay to zero. - pub fn migrate_to_time_delayed_proxies() -> Weight { + pub fn migrate_to_time_delayed_proxies() -> Weight { Proxies::::translate::<(Vec<(T::AccountId, T::ProxyType)>, BalanceOf), _>( |_, (targets, deposit)| Some(( targets.into_iter() @@ -727,6 +727,6 @@ pub mod migration { deposit, )) ); - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index bcf3b678ed64438df8735234a77ef6ed61925322..08211052356234fcb33ebd01ba76f192ad2b0429 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -23,11 +23,11 @@ use super::*; use frame_support::{ assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event, RuntimeDebug, dispatch::DispatchError, traits::Filter, + impl_outer_event, RuntimeDebug, dispatch::DispatchError, traits::Filter, }; use codec::{Encode, Decode}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use crate as proxy; impl_outer_origin! { @@ -57,12 +57,14 @@ impl_outer_dispatch! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -74,13 +76,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -91,7 +86,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = TestEvent; @@ -100,7 +95,7 @@ impl pallet_balances::Trait for Test { type AccountStore = System; type WeightInfo = (); } -impl pallet_utility::Trait for Test { +impl pallet_utility::Config for Test { type Event = TestEvent; type Call = Call; type WeightInfo = (); @@ -143,7 +138,7 @@ impl Filter for BaseFilter { } } } -impl Trait for Test { +impl Config for Test { type Event = TestEvent; type Call = Call; type Currency = Balances; diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 944fe53a149c9d38bb6490ca618e3bfa69200fdb..8f5a608aa5854b5cc2969c277bda2ab6fedb5bbd 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -58,7 +58,7 @@ pub trait WeightInfo { /// Weights for pallet_proxy using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn proxy(p: u32, ) -> Weight { (32_194_000 as Weight) .saturating_add((215_000 as Weight).saturating_mul(p as Weight)) diff --git a/frame/randomness-collective-flip/README.md b/frame/randomness-collective-flip/README.md index 2af18d3d2f7b589c55e738b95b358542661e3c07..9885c734d9fad4cc0fda8a94b0178d34ebb93cb3 100644 --- a/frame/randomness-collective-flip/README.md +++ b/frame/randomness-collective-flip/README.md @@ -22,10 +22,10 @@ the system trait. ```rust use frame_support::{decl_module, dispatch, traits::Randomness}; -pub trait Trait: frame_system::Trait {} +pub trait Config: frame_system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn random_module_example(origin) -> dispatch::DispatchResult { let _random_value = >::random(&b"my context"[..]); diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index c1747669dab07f21e3fedafbfe9372734fe8dc02..7e0e64f3cc08439b93d72a9330642e0d27b8e39a 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -39,10 +39,10 @@ //! ``` //! use frame_support::{decl_module, dispatch, traits::Randomness}; //! -//! pub trait Trait: frame_system::Trait {} +//! pub trait Config: frame_system::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn random_module_example(origin) -> dispatch::DispatchResult { //! let _random_value = >::random(&b"my context"[..]); @@ -63,18 +63,18 @@ use frame_support::{ }; use safe_mix::TripletMix; use codec::Encode; -use frame_system::Trait; +use frame_system::Config; const RANDOM_MATERIAL_LEN: u32 = 81; -fn block_number_to_index(block_number: T::BlockNumber) -> usize { +fn block_number_to_index(block_number: T::BlockNumber) -> usize { // on_initialize is called on the first block after genesis let index = (block_number - 1u32.into()) % RANDOM_MATERIAL_LEN.into(); index.try_into().ok().expect("Something % 81 is always smaller than usize; qed") } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { fn on_initialize(block_number: T::BlockNumber) -> Weight { let parent_hash = >::parent_hash(); @@ -91,7 +91,7 @@ decl_module! { } decl_storage! { - trait Store for Module as RandomnessCollectiveFlip { + trait Store for Module as RandomnessCollectiveFlip { /// Series of block headers from the last 81 blocks that acts as random seed material. This /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of /// the oldest hash. @@ -99,7 +99,7 @@ decl_storage! { } } -impl Randomness for Module { +impl Randomness for Module { /// This randomness uses a low-influence function, drawing upon the block hashes from the /// previous 81 blocks. Its result for any given subject will be known far in advance by anyone /// observing the chain. Any block producer has significant influence over their block hashes @@ -135,12 +135,12 @@ mod tests { use super::*; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, Header as _, IdentityLookup}, }; + use frame_system::limits; use frame_support::{ - impl_outer_origin, parameter_types, weights::Weight, traits::{Randomness, OnInitialize}, + impl_outer_origin, parameter_types, traits::{Randomness, OnInitialize}, }; #[derive(Clone, PartialEq, Eq)] @@ -152,13 +152,17 @@ mod tests { parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights + ::simple_max(1024); + pub BlockLength: limits::BlockLength = limits::BlockLength + ::max(2 * 1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = BlockLength; + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -170,13 +174,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = (); diff --git a/frame/recovery/README.md b/frame/recovery/README.md index b6d3ae5aceeb38471559dc7657febe6b40b5e720..c45df2c666af6afa6083921730f256217641669f 100644 --- a/frame/recovery/README.md +++ b/frame/recovery/README.md @@ -131,4 +131,4 @@ of this pallet are: * `set_recovered` - The ROOT origin is able to skip the recovery process and directly allow one account to access another. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index c97824497fded511935a9e4384e8632b4fd262f1..023a805a719bf5c0f6e56ae6ff705ee9218214e9 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -17,7 +17,7 @@ //! # Recovery Pallet //! -//! - [`recovery::Trait`](./trait.Trait.html) +//! - [`recovery::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -172,12 +172,12 @@ mod mock; mod tests; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable + GetDispatchInfo; @@ -237,7 +237,7 @@ pub struct RecoveryConfig { } decl_storage! { - trait Store for Module as Recovery { + trait Store for Module as Recovery { /// The set of recoverable accounts and their recovery configuration. pub Recoverable get(fn recovery_config): map hasher(twox_64_concat) T::AccountId @@ -262,7 +262,7 @@ decl_storage! { decl_event! { /// Events type. pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, { /// A recovery process has been set up for an \[account\]. RecoveryCreated(AccountId), @@ -284,7 +284,7 @@ decl_event! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// User is not allowed to make a call on behalf of this account NotAllowed, /// Threshold must be greater than zero @@ -321,7 +321,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The base amount of currency needed to reserve for creating a recovery configuration. @@ -361,7 +361,7 @@ decl_module! { )] fn as_recovered(origin, account: T::AccountId, - call: Box<::Call> + call: Box<::Call> ) -> DispatchResult { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` @@ -677,7 +677,7 @@ decl_module! { } } -impl Module { +impl Module { /// Check that friends list is sorted and has no duplicates. fn is_sorted_and_unique(friends: &Vec) -> bool { friends.windows(2).all(|w| w[0] < w[1]) diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 35373562487f72dd1496521b8459fbcb60d7a332..9b991987ceeba8680217ceff25b2d94cfdfd6ab4 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -21,12 +21,11 @@ use super::*; use frame_support::{ impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, - weights::Weight, traits::{OnInitialize, OnFinalize}, }; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, }; use crate as recovery; @@ -53,13 +52,15 @@ pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -71,13 +72,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -90,7 +84,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u128; type DustRemoval = (); @@ -107,7 +101,7 @@ parameter_types! { pub const RecoveryDeposit: u64 = 10; } -impl Trait for Test { +impl Config for Test { type Event = TestEvent; type Call = Call; type Currency = Balances; diff --git a/frame/scheduler/README.md b/frame/scheduler/README.md index 47beb71e3a0d1eeeec66d4df9a54aec775a94120..3d07818b15d5e5ecaa3339db27a66ac488d1f02d 100644 --- a/frame/scheduler/README.md +++ b/frame/scheduler/README.md @@ -12,7 +12,7 @@ specified block number or at a specified period. These scheduled dispatches may be named or anonymous and may be canceled. **NOTE:** The scheduled calls will be dispatched with the default filter -for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +for the origin: namely `frame_system::Config::BaseCallFilter` for all origin except root which will get no filter. And not the filter contained in origin use to call `fn schedule`. @@ -31,4 +31,4 @@ then those filter will not be used when dispatching the schedule call. `Vec` parameter that can be used for identification. * `cancel_named` - the named complement to the cancel function. -License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 753e9244628ad5cc3e8e3c77daf951212becb22a..6a67efc9d2dca3279c70a02ddd56a22b524452ab 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -31,7 +31,7 @@ use frame_system::Module as System; const BLOCK_NUMBER: u32 = 2; // Add `n` named items to the schedule -fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static str> { +fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static str> { // Essentially a no-op call. let call = frame_system::Call::set_storage(vec![]); for i in 0..n { diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index c467678a466decdd708d5171d388c7e5b0734e05..9f0f806233d82bfbdc99ad043147ce7d419e48bc 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -18,7 +18,7 @@ //! # Scheduler //! A module for scheduling dispatches. //! -//! - [`scheduler::Trait`](./trait.Trait.html) +//! - [`scheduler::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -29,7 +29,7 @@ //! may be named or anonymous and may be canceled. //! //! **NOTE:** The scheduled calls will be dispatched with the default filter -//! for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +//! for the origin: namely `frame_system::Config::BaseCallFilter` for all origin //! except root which will get no filter. And not the filter contained in origin //! use to call `fn schedule`. //! @@ -70,27 +70,27 @@ pub use weights::WeightInfo; /// pallet is dependent on specific other pallets, then their configuration traits /// should be added to our implied traits list. /// -/// `system::Trait` should always be included in our implied traits. -pub trait Trait: system::Trait { +/// `system::Config` should always be included in our implied traits. +pub trait Config: system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The aggregated origin which the dispatch will take. type Origin: OriginTrait + From + IsType<::Origin>; + Self::PalletsOrigin> + From + IsType<::Origin>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: From> + Codec + Clone + Eq; /// The aggregated call type. - type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo + From>; + type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo + From>; /// The maximum weight that may be scheduled per block for any dispatchables of less priority /// than `schedule::HARD_DEADLINE`. type MaximumWeight: Get; /// Required origin to schedule or cancel calls. - type ScheduleOrigin: EnsureOrigin<::Origin>; + type ScheduleOrigin: EnsureOrigin<::Origin>; /// The maximum number of scheduled calls in the queue for a single block. /// Not strictly enforced, but used for weight estimation. @@ -150,10 +150,10 @@ impl Default for Releases { } decl_storage! { - trait Store for Module as Scheduler { + trait Store for Module as Scheduler { /// Items to be executed, indexed by the block number that they should be executed on. pub Agenda: map hasher(twox_64_concat) T::BlockNumber - => Vec::Call, T::BlockNumber, T::PalletsOrigin, T::AccountId>>>; + => Vec::Call, T::BlockNumber, T::PalletsOrigin, T::AccountId>>>; /// Lookup from identity to the block number and index of the task. Lookup: map hasher(twox_64_concat) Vec => Option>; @@ -166,7 +166,7 @@ decl_storage! { } decl_event!( - pub enum Event where ::BlockNumber { + pub enum Event where ::BlockNumber { /// Scheduled some task. \[when, index\] Scheduled(BlockNumber, u32), /// Canceled some task. \[when, index\] @@ -177,7 +177,7 @@ decl_event!( ); decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Failed to schedule a call FailedToSchedule, /// Cannot find the scheduled call. @@ -191,7 +191,7 @@ decl_error! { decl_module! { /// Scheduler module declaration. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; fn deposit_event() = default; @@ -210,10 +210,10 @@ decl_module! { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule(DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), *call)?; } @@ -230,7 +230,7 @@ decl_module! { #[weight = T::WeightInfo::cancel(T::MaxScheduledPerBlock::get())] fn cancel(origin, when: T::BlockNumber, index: u32) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel(Some(origin.caller().clone()), (when, index))?; } @@ -250,10 +250,10 @@ decl_module! { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( id, DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), *call )?; @@ -272,7 +272,7 @@ decl_module! { #[weight = T::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get())] fn cancel_named(origin, id: Vec) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; } @@ -286,10 +286,10 @@ decl_module! { after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule( DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), *call )?; @@ -306,10 +306,10 @@ decl_module! { after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, + call: Box<::Call>, ) { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( id, DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), *call )?; @@ -347,7 +347,7 @@ decl_module! { *cumulative_weight = cumulative_weight .saturating_add(s.call.get_dispatch_info().weight); - let origin = <::Origin as From>::from( + let origin = <::Origin as From>::from( s.origin.clone() ).into(); @@ -415,7 +415,7 @@ decl_module! { } } -impl Module { +impl Module { /// Migrate storage format from V1 to V2. /// Return true if migration is performed. pub fn migrate_v1_to_t2() -> bool { @@ -423,7 +423,7 @@ impl Module { StorageVersion::put(Releases::V2); Agenda::::translate::< - Vec::Call, T::BlockNumber>>>, _ + Vec::Call, T::BlockNumber>>>, _ >(|_, agenda| Some( agenda .into_iter() @@ -447,7 +447,7 @@ impl Module { /// Helper to migrate scheduler when the pallet origin type has changed. pub fn migrate_origin + codec::Decode>() { Agenda::::translate::< - Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, _ + Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, _ >(|_, agenda| Some( agenda .into_iter() @@ -485,7 +485,7 @@ impl Module { maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call + call: ::Call ) -> Result, DispatchError> { let when = Self::resolve_time(when)?; @@ -569,7 +569,7 @@ impl Module { maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call, + call: ::Call, ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { @@ -657,7 +657,7 @@ impl Module { } } -impl schedule::Anon::Call, T::PalletsOrigin> for Module { +impl schedule::Anon::Call, T::PalletsOrigin> for Module { type Address = TaskAddress; fn schedule( @@ -665,7 +665,7 @@ impl schedule::Anon::Call, T::PalletsOrig maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call + call: ::Call ) -> Result { Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -686,7 +686,7 @@ impl schedule::Anon::Call, T::PalletsOrig } } -impl schedule::Named::Call, T::PalletsOrigin> for Module { +impl schedule::Named::Call, T::PalletsOrigin> for Module { type Address = TaskAddress; fn schedule_named( @@ -695,7 +695,7 @@ impl schedule::Named::Call, T::PalletsOri maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call, + call: ::Call, ) -> Result { Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call).map_err(|_| ()) } @@ -746,8 +746,8 @@ mod tests { pub fn log() -> Vec<(OriginCaller, u32)> { LOG.with(|log| log.borrow().clone()) } - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; + pub trait Config: system::Config { + type Event: From + Into<::Event>; } decl_event! { pub enum Event { @@ -755,10 +755,10 @@ mod tests { } } decl_module! { - pub struct Module for enum Call + pub struct Module for enum Call where - origin: ::Origin, - ::Origin: OriginTrait + origin: ::Origin, + ::Origin: OriginTrait { fn deposit_event() = default; @@ -812,12 +812,14 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 2_000_000_000_000; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); } - impl system::Trait for Test { + impl system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Call = Call; type Index = u64; @@ -829,13 +831,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -843,18 +838,18 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); } - impl logger::Trait for Test { + impl logger::Config for Test { type Event = (); } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 10; } ord_parameter_types! { pub const One: u64 = 1; } - impl Trait for Test { + impl Config for Test { type Event = (); type Origin = Origin; type PalletsOrigin = OriginCaller; @@ -889,7 +884,7 @@ mod tests { fn basic_scheduling_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call)); run_to_block(3); assert!(logger::log().is_empty()); @@ -905,7 +900,7 @@ mod tests { new_test_ext().execute_with(|| { run_to_block(2); let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call)); run_to_block(5); @@ -922,7 +917,7 @@ mod tests { new_test_ext().execute_with(|| { run_to_block(2); let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call)); // Will trigger on the next block. run_to_block(3); @@ -960,7 +955,7 @@ mod tests { fn reschedule_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_eq!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call).unwrap(), (4, 0)); run_to_block(3); @@ -985,7 +980,7 @@ mod tests { fn reschedule_named_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_eq!(Scheduler::do_schedule_named( 1u32.encode(), DispatchTime::At(4), None, 127, root(), call ).unwrap(), (4, 0)); @@ -1012,7 +1007,7 @@ mod tests { fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::filter(&call)); assert_eq!(Scheduler::do_schedule_named( 1u32.encode(), DispatchTime::At(4), Some((3, 3)), 127, root(), call ).unwrap(), (4, 0)); @@ -1203,10 +1198,10 @@ mod tests { #[test] fn on_initialize_weight_is_correct() { new_test_ext().execute_with(|| { - let base_weight: Weight = ::DbWeight::get().reads_writes(1, 2); + let base_weight: Weight = ::DbWeight::get().reads_writes(1, 2); let base_multiplier = 0; - let named_multiplier = ::DbWeight::get().writes(1); - let periodic_multiplier = ::DbWeight::get().reads_writes(1, 1); + let named_multiplier = ::DbWeight::get().writes(1); + let periodic_multiplier = ::DbWeight::get().reads_writes(1, 1); // Named assert_ok!( diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index 3699e6f85b234f438011be0fb3c714958c9fe0b8..3c8be54c9ae54ce35069b820f6ae2b4b23555e0f 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -52,7 +52,7 @@ pub trait WeightInfo { /// Weights for pallet_scheduler using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn schedule(s: u32, ) -> Weight { (35_029_000 as Weight) .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md index 948d5b497721b7f69c8ff2308d9c2d66cd1fbc60..8f7198a5e11de8cf0736c79de6d7a2b9f46002e8 100644 --- a/frame/scored-pool/README.md +++ b/frame/scored-pool/README.md @@ -41,10 +41,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_scored_pool::{self as scored_pool}; -pub trait Trait: scored_pool::Trait {} +pub trait Config: scored_pool::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn candidate(origin) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; @@ -63,4 +63,4 @@ decl_module! { This module depends on the [System module](https://docs.rs/frame-system/latest/frame_system/). -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 90d4aca4e42a42e8030a468fb741ae69fd846be4..afcac229367b14e8e8ff840880d9f059d3e3c5e7 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -37,7 +37,7 @@ //! from the `Pool` and `Members`; the entity is immediately replaced //! by the next highest scoring candidate in the pool, if available. //! -//! - [`scored_pool::Trait`](./trait.Trait.html) +//! - [`scored_pool::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -58,10 +58,10 @@ //! use frame_system::ensure_signed; //! use pallet_scored_pool::{self as scored_pool}; //! -//! pub trait Trait: scored_pool::Trait {} +//! pub trait Config: scored_pool::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn candidate(origin) -> dispatch::DispatchResult { //! let who = ensure_signed(origin)?; @@ -103,8 +103,8 @@ use frame_support::{ use frame_system::{ensure_root, ensure_signed}; use sp_runtime::traits::{AtLeast32Bit, MaybeSerializeDeserialize, Zero, StaticLookup}; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type PoolT = Vec<(::AccountId, Option<>::Score>)>; +type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +type PoolT = Vec<(::AccountId, Option<>::Score>)>; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated @@ -116,7 +116,7 @@ enum ChangeReceiver { MembershipChanged, } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The currency used for deposits. type Currency: Currency + ReservableCurrency; @@ -125,7 +125,7 @@ pub trait Trait: frame_system::Trait { AtLeast32Bit + Clone + Copy + Default + FullCodec + MaybeSerializeDeserialize + Debug; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; // The deposit which is reserved from candidates if they want to // start a candidacy. The deposit gets returned when the candidacy is @@ -156,7 +156,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as ScoredPool { + trait Store for Module, I: Instance=DefaultInstance> as ScoredPool { /// The current pool of candidates, stored as an ordered Vec /// (ordered descending by score, `None` last, highest first). Pool get(fn pool) config(): PoolT; @@ -204,7 +204,7 @@ decl_storage! { decl_event!( pub enum Event where - ::AccountId, + ::AccountId, { /// The given member was removed. See the transaction for who. MemberRemoved, @@ -225,7 +225,7 @@ decl_event!( decl_error! { /// Error for the scored-pool module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Already a member. AlreadyInPool, /// Index out of bounds. @@ -236,7 +236,7 @@ decl_error! { } decl_module! { - pub struct Module, I: Instance=DefaultInstance> + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { @@ -275,7 +275,7 @@ decl_module! { // can be inserted as last element in pool, since entities with // `None` are always sorted to the end. - >::append((who.clone(), Option::<>::Score>::None)); + >::append((who.clone(), Option::<>::Score>::None)); >::insert(&who, true); @@ -382,7 +382,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { /// Fetches the `MemberCount` highest scoring members from /// `Pool` and puts them into `Members`. diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 59c0dc66cca609884dfb2b0f4d93e2bf764e3d33..7d49136cef4f2f822410c032c9cb9ce98d53d746 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -20,10 +20,10 @@ use super::*; use std::cell::RefCell; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight, ord_parameter_types}; +use frame_support::{impl_outer_origin, parameter_types, ord_parameter_types}; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + traits::{BlakeTwo256, IdentityLookup}, testing::Header, }; use frame_system::EnsureSignedBy; @@ -36,21 +36,21 @@ pub struct Test; parameter_types! { pub const CandidateDeposit: u64 = 25; pub const Period: u64 = 4; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const ExistentialDeposit: u64 = 1; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { pub const KickOrigin: u64 = 2; pub const ScoreOrigin: u64 = 3; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -62,13 +62,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -77,7 +70,7 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = (); @@ -114,7 +107,7 @@ impl InitializeMembers for TestChangeMembers { } } -impl Trait for Test { +impl Config for Test { type Event = (); type KickOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; diff --git a/frame/session/README.md b/frame/session/README.md index 60da8958f73d0177be221990ea93172e30e251bf..e1f8b7f8e0238da53061dd8141b031625bb8cb73 100644 --- a/frame/session/README.md +++ b/frame/session/README.md @@ -71,7 +71,7 @@ The [Staking pallet](https://docs.rs/pallet-staking/latest/pallet_staking/) uses ```rust use pallet_session as session; -fn validators() -> Vec<::ValidatorId> { +fn validators() -> Vec<::ValidatorId> { >::validators() } ``` @@ -80,4 +80,4 @@ fn validators() -> Vec<::V - [Staking](https://docs.rs/pallet-staking/latest/pallet_staking/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 277200b269569c651e2f2b3e2910860cedf0b7a7..bd85b97c0d33e54762f7b03be74e73152c0df5de 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -41,10 +41,10 @@ use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; -pub struct Module(pallet_session::Module); -pub trait Trait: pallet_session::Trait + pallet_session::historical::Trait + pallet_staking::Trait {} +pub struct Module(pallet_session::Module); +pub trait Config: pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config {} -impl OnInitialize for Module { +impl OnInitialize for Module { fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { pallet_session::Module::::on_initialize(n) } @@ -121,7 +121,7 @@ benchmarks! { /// Sets up the benchmark for checking a membership proof. It creates the given /// number of validators, sets random session keys and then creates a membership /// proof for the first authority and returns its key and the proof. -fn check_membership_proof_setup( +fn check_membership_proof_setup( n: u32, ) -> ( (sp_runtime::KeyTypeId, &'static [u8; 32]), diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 6a9cfc5f98a1b7290828e5f2e5605e4a7aa41630..9001dee8790185d927b6e828a014f40d6ecd16f0 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -45,8 +45,11 @@ impl_outer_dispatch! { #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -58,13 +61,6 @@ impl frame_system::Trait for Test { type Header = sp_runtime::testing::Header; type Event = (); type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -75,7 +71,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = Balance; type Event = (); @@ -88,13 +84,13 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -120,7 +116,7 @@ impl pallet_session::SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; @@ -157,7 +153,7 @@ impl frame_system::offchain::SendTransactionTypes for Test where type Extrinsic = Extrinsic; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -182,7 +178,7 @@ impl pallet_staking::Trait for Test { type WeightInfo = (); } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 20c3d57464c89c9a3f6b4877e64210e489a79513..53f4dd7639b8cd86d16ba980d52b6f57c8a5aa16 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -41,8 +41,8 @@ mod shared; pub mod offchain; pub mod onchain; -/// Trait necessary for the historical module. -pub trait Trait: super::Trait { +/// Config necessary for the historical module. +pub trait Config: super::Config { /// Full identification of the validator. type FullIdentification: Parameter; @@ -57,7 +57,7 @@ pub trait Trait: super::Trait { } decl_storage! { - trait Store for Module as Session { + trait Store for Module as Session { /// Mapping from historical session indices to session-data root hash and validator count. HistoricalSessions get(fn historical_root): map hasher(twox_64_concat) SessionIndex => Option<(T::Hash, ValidatorCount)>; @@ -71,10 +71,10 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } -impl Module { +impl Module { /// Prune historical stored session roots up to (but not including) /// `up_to`. pub fn prune_up_to(up_to: SessionIndex) { @@ -116,7 +116,7 @@ pub trait SessionManager: crate::SessionManager /// sets the historical trie root of the ending session. pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); -impl crate::SessionManager for NoteHistoricalRoot +impl crate::SessionManager for NoteHistoricalRoot where I: SessionManager { fn new_session(new_index: SessionIndex) -> Option> { @@ -160,15 +160,15 @@ impl crate::SessionManager for NoteHistoricalRoot = (::ValidatorId, ::FullIdentification); +pub type IdentificationTuple = (::ValidatorId, ::FullIdentification); /// A trie instance for checking and generating proofs. -pub struct ProvingTrie { +pub struct ProvingTrie { db: MemoryDB, root: T::Hash, } -impl ProvingTrie { +impl ProvingTrie { fn generate_for(validators: I) -> Result where I: IntoIterator { @@ -260,7 +260,7 @@ impl ProvingTrie { } } -impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> +impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> for Module { type Proof = MembershipProof; diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index 97655d1a18b3280b85e7d2074e8071152611dfea..9bb20ababb3ae0b54bb2de3506071d45a928a26b 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -29,18 +29,18 @@ use sp_runtime::{offchain::storage::StorageValueRef, KeyTypeId}; use sp_session::MembershipProof; use super::super::{Module as SessionModule, SessionIndex}; -use super::{IdentificationTuple, ProvingTrie, Trait}; +use super::{IdentificationTuple, ProvingTrie, Config}; use super::shared; use sp_std::prelude::*; /// A set of validators, which was used for a fixed session index. -struct ValidatorSet { +struct ValidatorSet { validator_set: Vec>, } -impl ValidatorSet { +impl ValidatorSet { /// Load the set of validators for a particular session index from the off-chain storage. /// /// If none is found or decodable given `prefix` and `session`, it will return `None`. @@ -61,7 +61,7 @@ impl ValidatorSet { /// Implement conversion into iterator for usage /// with [ProvingTrie](super::ProvingTrie::generate_for). -impl sp_std::iter::IntoIterator for ValidatorSet { +impl sp_std::iter::IntoIterator for ValidatorSet { type Item = (T::ValidatorId, T::FullIdentification); type IntoIter = sp_std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { @@ -74,7 +74,7 @@ impl sp_std::iter::IntoIterator for ValidatorSet { /// Based on the yielded `MembershipProof` the implementer may decide what /// to do, i.e. in case of a failed proof, enqueue a transaction back on /// chain reflecting that, with all its consequences such as i.e. slashing. -pub fn prove_session_membership>( +pub fn prove_session_membership>( session_index: SessionIndex, session_key: (KeyTypeId, D), ) -> Option { @@ -97,7 +97,7 @@ pub fn prove_session_membership>( /// Due to re-organisation it could be that the `first_to_keep` might be less /// than the stored one, in which case the conservative choice is made to keep records /// up to the one that is the lesser. -pub fn prune_older_than(first_to_keep: SessionIndex) { +pub fn prune_older_than(first_to_keep: SessionIndex) { let derived_key = shared::LAST_PRUNE.to_vec(); let entry = StorageValueRef::persistent(derived_key.as_ref()); match entry.mutate(|current: Option>| -> Result<_, ()> { @@ -127,7 +127,7 @@ pub fn prune_older_than(first_to_keep: SessionIndex) { } /// Keep the newest `n` items, and prune all items older than that. -pub fn keep_newest(n_to_keep: usize) { +pub fn keep_newest(n_to_keep: usize) { let session_index = >::current_index(); let n_to_keep = n_to_keep as SessionIndex; if n_to_keep < session_index { @@ -189,12 +189,12 @@ mod tests { #[test] fn encode_decode_roundtrip() { use codec::{Decode, Encode}; - use super::super::super::Trait as SessionTrait; - use super::super::Trait as HistoricalTrait; + use super::super::super::Config as SessionConfig; + use super::super::Config as HistoricalConfig; let sample = ( - 22u32 as ::ValidatorId, - 7_777_777 as ::FullIdentification); + 22u32 as ::ValidatorId, + 7_777_777 as ::FullIdentification); let encoded = sample.encode(); let decoded = Decode::decode(&mut encoded.as_slice()).expect("Must decode"); diff --git a/frame/session/src/historical/onchain.rs b/frame/session/src/historical/onchain.rs index 745603a49829be556cddd7f1fbfe244a1ea0a93d..1ee7ce4419df936a0cf3d46cb9b3df374e34f7ee 100644 --- a/frame/session/src/historical/onchain.rs +++ b/frame/session/src/historical/onchain.rs @@ -20,9 +20,9 @@ use codec::Encode; use sp_runtime::traits::Convert; -use super::super::Trait as SessionTrait; +use super::super::Config as SessionConfig; use super::super::{Module as SessionModule, SessionIndex}; -use super::Trait as HistoricalTrait; +use super::Config as HistoricalConfig; use super::shared; use sp_std::prelude::*; @@ -35,14 +35,14 @@ use sp_std::prelude::*; /// `on_initialize(..)` or `on_finalization(..)`. /// **Must** be called during the session, which validator-set is to be stored for further /// off-chain processing. Otherwise the `FullIdentification` might not be available. -pub fn store_session_validator_set_to_offchain( +pub fn store_session_validator_set_to_offchain( session_index: SessionIndex, ) { let encoded_validator_list = >::validators() .into_iter() - .filter_map(|validator_id: ::ValidatorId| { + .filter_map(|validator_id: ::ValidatorId| { let full_identification = - <::FullIdentificationOf>::convert(validator_id.clone()); + <::FullIdentificationOf>::convert(validator_id.clone()); full_identification.map(|full_identification| (validator_id, full_identification)) }) .collect::>(); @@ -55,8 +55,8 @@ pub fn store_session_validator_set_to_offchain() { +pub fn store_current_session_validator_set_to_offchain() { store_session_validator_set_to_offchain::(>::current_index()); } diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index c0a8fc29165bdae496e6d6fbf3315aeda21fc19c..dd176219aa7c1bb524638777987142be61316865 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -20,7 +20,7 @@ //! The Session module allows validators to manage their session keys, provides a function for changing //! the session length, and handles session rotation. //! -//! - [`session::Trait`](./trait.Trait.html) +//! - [`session::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -88,7 +88,7 @@ //! ``` //! use pallet_session as session; //! -//! fn validators() -> Vec<::ValidatorId> { +//! fn validators() -> Vec<::ValidatorId> { //! >::validators() //! } //! # fn main(){} @@ -346,15 +346,15 @@ impl SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl ValidatorRegistration for Module { +impl ValidatorRegistration for Module { fn is_registered(id: &T::ValidatorId) -> bool { Self::load_keys(id).is_some() } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// A stable ID for a validator. type ValidatorId: Member + Parameter; @@ -392,7 +392,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Session { + trait Store for Module as Session { /// The current set of validators. Validators get(fn validators): Vec; @@ -483,7 +483,7 @@ decl_event!( decl_error! { /// Error for the session module. - pub enum Error for Module { + pub enum Error for Module { /// Invalid ownership proof. InvalidProof, /// No associated validator ID for account. @@ -496,7 +496,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -549,7 +549,7 @@ decl_module! { fn on_initialize(n: T::BlockNumber) -> Weight { if T::ShouldEndSession::should_end_session(n) { Self::rotate_session(); - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block } else { // NOTE: the non-database part of the weight for `should_end_session(n)` is // included as weight for empty block, the database part is expected to be in @@ -560,7 +560,7 @@ decl_module! { } } -impl Module { +impl Module { /// Move on to next session. Register new validator set and session keys. Changes /// to the validator set have a session of delay to take effect. This allows for /// equivocation punishment after a fork. @@ -683,6 +683,55 @@ impl Module { Self::validators().iter().position(|i| i == c).map(Self::disable_index).ok_or(()) } + /// Upgrade the key type from some old type to a new type. Supports adding + /// and removing key types. + /// + /// This function should be used with extreme care and only during an + /// `on_runtime_upgrade` block. Misuse of this function can put your blockchain + /// into an unrecoverable state. + /// + /// Care should be taken that the raw versions of the + /// added keys are unique for every `ValidatorId, KeyTypeId` combination. + /// This is an invariant that the session module typically maintains internally. + /// + /// As the actual values of the keys are typically not known at runtime upgrade, + /// it's recommended to initialize the keys to a (unique) dummy value with the expectation + /// that all validators should invoke `set_keys` before those keys are actually + /// required. + pub fn upgrade_keys(upgrade: F) where + Old: OpaqueKeys + Member + Decode, + F: Fn(T::ValidatorId, Old) -> T::Keys, + { + let old_ids = Old::key_ids(); + let new_ids = T::Keys::key_ids(); + + // Translate NextKeys, and key ownership relations at the same time. + >::translate::(|val, old_keys| { + // Clear all key ownership relations. Typically the overlap should + // stay the same, but no guarantees by the upgrade function. + for i in old_ids.iter() { + Self::clear_key_owner(*i, old_keys.get_raw(*i)); + } + + let new_keys = upgrade(val.clone(), old_keys); + + // And now set the new ones. + for i in new_ids.iter() { + Self::put_key_owner(*i, new_keys.get_raw(*i), &val); + } + + Some(new_keys) + }); + + let _ = >::translate::, _>( + |k| { + k.map(|k| k.into_iter() + .map(|(val, old_keys)| (val.clone(), upgrade(val, old_keys))) + .collect::>()) + } + ); + } + /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. /// /// This ensures that the reference counter in system is incremented appropriately and as such @@ -776,7 +825,7 @@ impl Module { /// registering account-ID of that session key index. pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); -impl> FindAuthor +impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option @@ -789,7 +838,7 @@ impl> FindAuthor } } -impl EstimateNextNewSession for Module { +impl EstimateNextNewSession for Module { /// This session module always calls new_session and next_session at the same time, hence we /// do a simple proxy and pass the function to next rotation. fn estimate_next_new_session(now: T::BlockNumber) -> Option { diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 1d787ac53b43854dee6f6ca34ace0df1172737c4..fa71859feb4012baf7d6567710f7ea7bce051b48 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -19,7 +19,7 @@ use super::*; use std::cell::RefCell; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::{impl_outer_origin, parameter_types}; use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ Perbill, impl_opaque_keys, @@ -40,6 +40,31 @@ impl From for MockSessionKeys { } } +pub const KEY_ID_A: KeyTypeId = KeyTypeId([4; 4]); +pub const KEY_ID_B: KeyTypeId = KeyTypeId([9; 4]); + +#[derive(Debug, Clone, codec::Encode, codec::Decode, PartialEq, Eq)] +pub struct PreUpgradeMockSessionKeys { + pub a: [u8; 32], + pub b: [u8; 64], +} + +impl OpaqueKeys for PreUpgradeMockSessionKeys { + type KeyTypeIdProviders = (); + + fn key_ids() -> &'static [KeyTypeId] { + &[KEY_ID_A, KEY_ID_B] + } + + fn get_raw(&self, i: KeyTypeId) -> &[u8] { + match i { + i if i == KEY_ID_A => &self.a[..], + i if i == KEY_ID_B => &self.b[..], + _ => &[], + } + } +} + impl_outer_origin! { pub enum Origin for Test where system = frame_system {} } @@ -165,15 +190,17 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; pub const MinimumPeriod: u64 = 5; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -185,13 +212,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = (); @@ -200,7 +220,7 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -211,7 +231,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } -impl Trait for Test { +impl Config for Test { type ShouldEndSession = TestShouldEndSession; #[cfg(feature = "historical")] type SessionManager = crate::historical::NoteHistoricalRoot; @@ -228,7 +248,7 @@ impl Trait for Test { } #[cfg(feature = "historical")] -impl crate::historical::Trait for Test { +impl crate::historical::Config for Test { type FullIdentification = u64; type FullIdentificationOf = sp_runtime::traits::ConvertInto; } diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 75def78046bebf97111feca0320a256d17bb3207..7a33aa5296bc81da805e781d39854c383a832cac 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -25,6 +25,7 @@ use mock::{ SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, set_next_validators, set_session_length, session_changed, Origin, System, Session, reset_before_session_end_called, before_session_end_called, new_test_ext, + PreUpgradeMockSessionKeys, }; fn initialize_block(block: u64) { @@ -285,7 +286,7 @@ fn session_keys_generate_output_works_as_set_keys_input() { assert_ok!( Session::set_keys( Origin::signed(2), - ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), + ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), vec![], ) ); @@ -308,3 +309,97 @@ fn return_true_if_more_than_third_is_disabled() { assert_eq!(Session::disable_index(3), true); }); } + +#[test] +fn upgrade_keys() { + use frame_support::storage; + use mock::Test; + use sp_core::crypto::key_types::DUMMY; + + // This test assumes certain mocks. + assert_eq!(mock::NEXT_VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + assert_eq!(mock::VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + + new_test_ext().execute_with(|| { + let pre_one = PreUpgradeMockSessionKeys { + a: [1u8; 32], + b: [1u8; 64], + }; + + let pre_two = PreUpgradeMockSessionKeys { + a: [2u8; 32], + b: [2u8; 64], + }; + + let pre_three = PreUpgradeMockSessionKeys { + a: [3u8; 32], + b: [3u8; 64], + }; + + let val_keys = vec![ + (1u64, pre_one), + (2u64, pre_two), + (3u64, pre_three), + ]; + + // Set `QueuedKeys`. + { + let storage_key = >::hashed_key(); + assert!(storage::unhashed::exists(&storage_key)); + storage::unhashed::put(&storage_key, &val_keys); + } + + // Set `NextKeys`. + { + for &(i, ref keys) in val_keys.iter() { + let storage_key = >::hashed_key_for(i); + assert!(storage::unhashed::exists(&storage_key)); + storage::unhashed::put(&storage_key, keys); + } + } + + // Set `KeyOwner`. + { + for &(i, ref keys) in val_keys.iter() { + // clear key owner for `UintAuthorityId` keys set in genesis. + let presumed = UintAuthorityId(i); + let raw_prev = presumed.as_ref(); + + assert_eq!(Session::key_owner(DUMMY, raw_prev), Some(i)); + Session::clear_key_owner(DUMMY, raw_prev); + + Session::put_key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A), &i); + Session::put_key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B), &i); + } + } + + // Do the upgrade and check sanity. + let mock_keys_for = |val| mock::MockSessionKeys { dummy: UintAuthorityId(val) }; + Session::upgrade_keys::( + |val, _old_keys| mock_keys_for(val), + ); + + // Check key ownership. + for (i, ref keys) in val_keys.iter() { + assert!(Session::key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A)).is_none()); + assert!(Session::key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B)).is_none()); + + let migrated_key = UintAuthorityId(*i); + assert_eq!(Session::key_owner(DUMMY, migrated_key.as_ref()), Some(*i)); + } + + // Check queued keys. + assert_eq!( + Session::queued_keys(), + vec![ + (1, mock_keys_for(1)), + (2, mock_keys_for(2)), + (3, mock_keys_for(3)), + ], + ); + + for i in 1u64..4 { + assert_eq!(>::get(&i), Some(mock_keys_for(i))); + } + }) +} diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index f1fc18b0ef99819ad2f8dc964e25f9a4c6462e7e..243ddc04b085fb734f41930b6b5041c334c14b41 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -50,7 +50,7 @@ pub trait WeightInfo { /// Weights for pallet_session using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn set_keys() -> Weight { (86_033_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) diff --git a/frame/society/README.md b/frame/society/README.md index 372dfe1f048ef15da5787c08c9ca825e1592e5c7..a25940f636de9606a5604d0a6f67b5719bc054c0 100644 --- a/frame/society/README.md +++ b/frame/society/README.md @@ -225,4 +225,4 @@ make judgement on a suspended candidate. * `set_max_membership` - The ROOT origin can update the maximum member count for the society. The max membership count must be greater than 1. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index cbfe5a00de240ea8ce8217dbbfb26ac8d1d1b932..6fe8a2673b21be509eae30f8783aab469e422ee7 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -17,7 +17,7 @@ //! # Society Module //! -//! - [`society::Trait`](./trait.Trait.html) +//! - [`society::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -268,13 +268,13 @@ use frame_support::traits::{ }; use frame_system::{self as system, ensure_signed, ensure_root}; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; /// The module's configuration trait. -pub trait Trait: system::Trait { +pub trait Config: system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The societies's module id type ModuleId: Get; @@ -403,7 +403,7 @@ impl BidKind { // This module's storage items. decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Society { + trait Store for Module, I: Instance=DefaultInstance> as Society { /// The first member. pub Founder get(fn founder) build(|config: &GenesisConfig| config.members.first().cloned()): Option; @@ -472,7 +472,7 @@ decl_storage! { // The module's dispatchable functions. decl_module! { /// The module declaration. - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { type Error = Error; /// The minimum amount of a deposit required for a bid to be made. const CandidateDeposit: BalanceOf = T::CandidateDeposit::get(); @@ -533,7 +533,7 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn bid(origin, value: BalanceOf) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!>::contains_key(&who), Error::::Suspended); @@ -572,7 +572,7 @@ decl_module! { /// /// Total Complexity: O(B + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn unbid(origin, pos: u32) -> DispatchResult { let who = ensure_signed(origin)?; @@ -642,7 +642,7 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn vouch(origin, who: T::AccountId, value: BalanceOf, tip: BalanceOf) -> DispatchResult { let voucher = ensure_signed(origin)?; // Check user is not suspended. @@ -683,7 +683,7 @@ decl_module! { /// /// Total Complexity: O(B) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn unvouch(origin, pos: u32) -> DispatchResult { let voucher = ensure_signed(origin)?; ensure!(Self::vouching(&voucher) == Some(VouchingStatus::Vouching), Error::::NotVouching); @@ -721,7 +721,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + C) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn vote(origin, candidate: ::Source, approve: bool) { let voter = ensure_signed(origin)?; let candidate = T::Lookup::lookup(candidate)?; @@ -752,7 +752,7 @@ decl_module! { /// /// Total Complexity: O(M + logM) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn defender_vote(origin, approve: bool) { let voter = ensure_signed(origin)?; let members = >::get(); @@ -784,7 +784,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + P + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn payout(origin) { let who = ensure_signed(origin)?; @@ -826,7 +826,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn found(origin, founder: T::AccountId, max_members: u32, rules: Vec) { T::FounderSetOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::AlreadyFounded); @@ -853,7 +853,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn unfound(origin) { let founder = ensure_signed(origin)?; ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); @@ -895,7 +895,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + B) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn judge_suspended_member(origin, who: T::AccountId, forgive: bool) { T::SuspensionJudgementOrigin::ensure_origin(origin)?; ensure!(>::contains_key(&who), Error::::NotSuspended); @@ -966,7 +966,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + B + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn judge_suspended_candidate(origin, who: T::AccountId, judgement: Judgement) { T::SuspensionJudgementOrigin::ensure_origin(origin)?; if let Some((value, kind)) = >::get(&who) { @@ -1026,7 +1026,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn set_max_members(origin, max: u32) { ensure_root(origin)?; ensure!(max > 1, Error::::MaxMembers); @@ -1038,13 +1038,14 @@ decl_module! { let mut members = vec![]; let mut weight = 0; + let weights = T::BlockWeights::get(); // Run a candidate/membership rotation if (n % T::RotationPeriod::get()).is_zero() { members = >::get(); Self::rotate_period(&mut members); - weight += T::MaximumBlockWeight::get() / 20; + weight += weights.max_block / 20; } // Run a challenge rotation @@ -1055,7 +1056,7 @@ decl_module! { } Self::rotate_challenge(&mut members); - weight += T::MaximumBlockWeight::get() / 20; + weight += weights.max_block / 20; } weight @@ -1065,7 +1066,7 @@ decl_module! { decl_error! { /// Errors for this module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// An incorrect position was provided. BadPosition, /// User is not a member. @@ -1108,7 +1109,7 @@ decl_error! { decl_event! { /// Events for this module. pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, Balance = BalanceOf { /// The society is founded by the given identity. \[founder\] @@ -1151,7 +1152,7 @@ decl_event! { /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); -impl EnsureOrigin for EnsureFounder { +impl EnsureOrigin for EnsureFounder { type Success = T::AccountId; fn try_origin(o: T::Origin) -> Result { o.into().and_then(|o| match (o, Founder::::get()) { @@ -1182,7 +1183,7 @@ fn pick_usize<'a, R: RngCore>(rng: &mut R, max: usize) -> usize { (rng.next_u32() % (max as u32 + 1)) as usize } -impl, I: Instance> Module { +impl, I: Instance> Module { /// Puts a bid into storage ordered by smallest to largest value. /// Allows a maximum of 1000 bids in queue, removing largest value people first. fn put_bid( @@ -1669,7 +1670,7 @@ impl, I: Instance> Module { } } -impl OnUnbalanced> for Module { +impl OnUnbalanced> for Module { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 212bcfd404ff152dc47be29bb063e891f91910bd..6a718c21850736266f959e8d59cba5d28a4c1bd2 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -25,7 +25,6 @@ use frame_support::{ }; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; @@ -45,14 +44,11 @@ parameter_types! { pub const PeriodSpend: u64 = 1000; pub const MaxLockDuration: u64 = 100; pub const ChallengePeriod: u64 = 8; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: u32 = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const ExistentialDeposit: u64 = 1; pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { @@ -60,8 +56,11 @@ ord_parameter_types! { pub const SuspensionJudgementSetAccount: u128 = 2; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -73,13 +72,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type OnNewAccount = (); @@ -88,7 +80,7 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = (); @@ -98,7 +90,7 @@ impl pallet_balances::Trait for Test { type WeightInfo = (); } -impl Trait for Test { +impl Config for Test { type Event = (); type Currency = pallet_balances::Module; type Randomness = TestRandomness; diff --git a/frame/staking/README.md b/frame/staking/README.md index 78474ee842216eb436976901e7dc95daf162c7cd..a379d0a7ad5e28a82005a62bac597940e69eaefa 100644 --- a/frame/staking/README.md +++ b/frame/staking/README.md @@ -90,7 +90,7 @@ valid behavior_ while _punishing any misbehavior or lack of availability_. Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each nominator's account. @@ -137,10 +137,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_staking::{self as staking}; -pub trait Trait: staking::Trait {} +pub trait Config: staking::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Reward a validator. #[weight = 0] pub fn reward_myself(origin) -> dispatch::DispatchResult { @@ -176,7 +176,7 @@ Validators and nominators are rewarded at the end of each era. The total reward calculated using the era duration and the staking rate (the total amount of tokens staked by nominators and validators, divided by the total token supply). It aims to incentivize toward a defined staking rate. The full specification can be found -[here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model). +[here](https://research.web3.foundation/en/latest/polkadot/economics/1-token-economics.html#inflation-model). Total reward is split among validators and their nominators depending on the number of points they received during the era. Points are added to a validator using @@ -246,4 +246,4 @@ The Staking module depends on the [`GenesisConfig`](https://docs.rs/pallet-staki - [Session](https://docs.rs/pallet-session/latest/pallet_session/): Used to manage sessions. Also, a list of new validators is stored in the Session module's `Validators` at the end of each era. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 96df7674e9f44b4e7615588ecd118232682e065e..6f58d6a669d7c6c0088d01038d5119f47602bb62 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -43,13 +43,12 @@ impl_outer_dispatch! { #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); - type Origin = Origin; + type BlockWeights = (); + type BlockLength = (); type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; type Call = Call; @@ -60,9 +59,6 @@ impl frame_system::Trait for Test { type Header = sp_runtime::testing::Header; type Event = (); type BlockHashCount = (); - type MaximumBlockWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -73,7 +69,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = Balance; type Event = (); @@ -82,7 +78,7 @@ impl pallet_balances::Trait for Test { type AccountStore = System; type WeightInfo = (); } -impl pallet_indices::Trait for Test { +impl pallet_indices::Config for Test { type AccountIndex = AccountIndex; type Event = (); type Currency = Balances; @@ -92,13 +88,13 @@ impl pallet_indices::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -124,7 +120,7 @@ impl pallet_session::SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; @@ -161,7 +157,7 @@ impl frame_system::offchain::SendTransactionTypes for Test where type Extrinsic = Extrinsic; } -impl pallet_staking::Trait for Test { +impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 94a97debe4ff3844e57eeae3960771235d97afe9..d336bfd1ddda50740445e758afbc2e6c2978d34c 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -31,7 +31,7 @@ const MAX_SLASHES: u32 = 1000; // Add slashing spans to a user account. Not relevant for actual use, only to benchmark // read and write operations. -fn add_slashing_spans(who: &T::AccountId, spans: u32) { +fn add_slashing_spans(who: &T::AccountId, spans: u32) { if spans == 0 { return } // For the first slashing span, we initialize @@ -48,7 +48,7 @@ fn add_slashing_spans(who: &T::AccountId, spans: u32) { // This function clears all existing validators and nominators from the set, and generates one new // validator being nominated by n nominators, and returns the validator stash account and the // nominators' stash and controller. It also starts an era and creates pending payouts. -pub fn create_validator_with_nominators( +pub fn create_validator_with_nominators( n: u32, upper_bound: u32, dead: bool, @@ -521,7 +521,12 @@ benchmarks! { compact, score, size - ) = offchain_election::prepare_submission::(assignments, winners, false, T::MaximumBlockWeight::get()).unwrap(); + ) = offchain_election::prepare_submission::( + assignments, + winners, + false, + T::BlockWeights::get().max_block, + ).unwrap(); assert_eq!( winners.len(), compact.unique_targets().len(), @@ -589,7 +594,12 @@ benchmarks! { compact, score, size - ) = offchain_election::prepare_submission::(assignments, winners, false, T::MaximumBlockWeight::get()).unwrap(); + ) = offchain_election::prepare_submission::( + assignments, + winners, + false, + T::BlockWeights::get().max_block, + ).unwrap(); assert_eq!( winners.len(), compact.unique_targets().len(), @@ -729,7 +739,7 @@ mod tests { let (validator_stash, nominators) = create_validator_with_nominators::( n, - ::MaxNominatorRewardedPerValidator::get() as u32, + ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, ).unwrap(); @@ -753,7 +763,7 @@ mod tests { let (validator_stash, _nominators) = create_validator_with_nominators::( n, - ::MaxNominatorRewardedPerValidator::get() as u32, + ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, ).unwrap(); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index e5aaae6bbb8f9ae362d06be16758a2c7b0b1a435..af326e27c62a8a58da39ba7dda00c69e252420bc 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -19,7 +19,7 @@ //! //! The Staking module is used to manage funds at stake by network maintainers. //! -//! - [`staking::Trait`](./trait.Trait.html) +//! - [`staking::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -107,7 +107,7 @@ //! //! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the //! `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -//! validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +//! validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] //! biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each //! nominator's account. //! @@ -154,10 +154,10 @@ //! use frame_system::ensure_signed; //! use pallet_staking::{self as staking}; //! -//! pub trait Trait: staking::Trait {} +//! pub trait Config: staking::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! /// Reward a validator. //! #[weight = 0] //! pub fn reward_myself(origin) -> dispatch::DispatchResult { @@ -175,7 +175,7 @@ //! ### Era payout //! //! The era payout is computed using yearly inflation curve defined at -//! [`T::RewardCurve`](./trait.Trait.html#associatedtype.RewardCurve) as such: +//! [`T::RewardCurve`](./trait.Config.html#associatedtype.RewardCurve) as such: //! //! ```nocompile //! staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -186,7 +186,7 @@ //! remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout //! ``` //! The remaining reward is send to the configurable end-point -//! [`T::RewardRemainder`](./trait.Trait.html#associatedtype.RewardRemainder). +//! [`T::RewardRemainder`](./trait.Config.html#associatedtype.RewardRemainder). //! //! ### Reward Calculation //! @@ -232,7 +232,7 @@ //! //! The controller account can free a portion (or all) of the funds using the //! [`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately -//! accessible. Instead, a duration denoted by [`BondingDuration`](./trait.Trait.html#associatedtype.BondingDuration) +//! accessible. Instead, a duration denoted by [`BondingDuration`](./trait.Config.html#associatedtype.BondingDuration) //! (in number of eras) must pass until the funds can actually be removed. Once the //! `BondingDuration` is over, the [`withdraw_unbonded`](./enum.Call.html#variant.withdraw_unbonded) //! call can be used to actually withdraw the funds. @@ -385,12 +385,12 @@ pub type OffchainAccuracy = PerU16; /// The balance type of this module. pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; type PositiveImbalanceOf = - <::Currency as Currency<::AccountId>>::PositiveImbalance; + <::Currency as Currency<::AccountId>>::PositiveImbalance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug)] @@ -732,8 +732,8 @@ impl Default for ElectionStatus { /// Means for interacting with a specialized version of the `session` trait. /// -/// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Trait` -pub trait SessionInterface: frame_system::Trait { +/// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config` +pub trait SessionInterface: frame_system::Config { /// Disable a given validator by stash ID. /// /// Returns `true` if new era should be forced at the end of this session. @@ -746,22 +746,22 @@ pub trait SessionInterface: frame_system::Trait { fn prune_historical_up_to(up_to: SessionIndex); } -impl SessionInterface<::AccountId> for T where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, +impl SessionInterface<::AccountId> for T where + T: pallet_session::Config::AccountId>, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, FullIdentificationOf = ExposureOf, >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: - Convert<::AccountId, Option<::AccountId>>, + Convert<::AccountId, Option<::AccountId>>, { - fn disable_validator(validator: &::AccountId) -> Result { + fn disable_validator(validator: &::AccountId) -> Result { >::disable(validator) } - fn validators() -> Vec<::AccountId> { + fn validators() -> Vec<::AccountId> { >::validators() } @@ -770,7 +770,7 @@ impl SessionInterface<::AccountId> for T whe } } -pub trait Trait: frame_system::Trait + SendTransactionTypes> { +pub trait Config: frame_system::Config + SendTransactionTypes> { /// The staking balance. type Currency: LockableCurrency; @@ -792,7 +792,7 @@ pub trait Trait: frame_system::Trait + SendTransactionTypes> { type RewardRemainder: OnUnbalanced>; /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Handler for the unbalanced reduction when slashing a staker. type Slash: OnUnbalanced>; @@ -904,7 +904,7 @@ impl Default for Releases { } decl_storage! { - trait Store for Module as Staking { + trait Store for Module as Staking { /// Number of eras to keep in history. /// /// Information is kept for eras in `[current_era - history_depth; current_era]`. @@ -1121,7 +1121,7 @@ decl_storage! { } decl_event!( - pub enum Event where Balance = BalanceOf, ::AccountId { + pub enum Event where Balance = BalanceOf, ::AccountId { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. /// \[era_index, validator_payout, remainder\] @@ -1153,7 +1153,7 @@ decl_event!( decl_error! { /// Error for the staking module. - pub enum Error for Module { + pub enum Error for Module { /// Not a controller account. NotController, /// Not a stash account. @@ -1223,7 +1223,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Number of sessions per era. const SessionsPerEra: SessionIndex = T::SessionsPerEra::get(); @@ -1474,11 +1474,13 @@ decl_module! { let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash_balance = T::Currency::free_balance(&stash); - if let Some(extra) = stash_balance.checked_sub(&ledger.total) { let extra = extra.min(max_additional); ledger.total += extra; ledger.active += extra; + // last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + Self::deposit_event(RawEvent::Bonded(stash, extra)); Self::update_ledger(&controller, &ledger); } @@ -1586,7 +1588,7 @@ decl_module! { ledger = ledger.consolidate_unlocked(current_era) } - let post_info_weight = if ledger.unlocking.is_empty() && ledger.active.is_zero() { + let post_info_weight = if ledger.unlocking.is_empty() && ledger.active <= T::Currency::minimum_balance() { // This account must have called `unbond()` with some value that caused the active // portion to fall below existential deposit + will have no more unlocking chunks // left. We can now safely remove all staking-related information. @@ -1973,6 +1975,9 @@ decl_module! { ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let ledger = ledger.rebond(value); + // last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); + Self::update_ledger(&controller, &ledger); Ok(Some( 35 * WEIGHT_PER_MICROS @@ -2159,14 +2164,14 @@ decl_module! { } } -impl Module { +impl Module { /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { // Weight note: consider making the stake accessible through stash. Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default() } - /// Internal impl of [`slashable_balance_of`] that returns [`VoteWeight`]. + /// Internal impl of [`Self::slashable_balance_of`] that returns [`VoteWeight`]. pub fn slashable_balance_of_vote_weight(stash: &T::AccountId, issuance: BalanceOf) -> VoteWeight { T::CurrencyToVote::to_vote(Self::slashable_balance_of(stash), issuance) } @@ -2833,7 +2838,7 @@ impl Module { /// Execute election and return the new results. The edge weights are processed into support /// values. /// - /// This is basically a wrapper around [`do_phragmen`] which translates + /// This is basically a wrapper around [`Self::do_phragmen`] which translates /// `PrimitiveElectionResult` into `ElectionResult`. /// /// No storage item is updated. @@ -3083,7 +3088,7 @@ impl Module { /// /// Once the first new_session is planned, all session must start and then end in order, though /// some session can lag in between the newest session planned and the latest session started. -impl pallet_session::SessionManager for Module { +impl pallet_session::SessionManager for Module { fn new_session(new_index: SessionIndex) -> Option> { Self::new_session(new_index) } @@ -3095,7 +3100,7 @@ impl pallet_session::SessionManager for Module { } } -impl historical::SessionManager>> for Module { +impl historical::SessionManager>> for Module { fn new_session(new_index: SessionIndex) -> Option>)>> { @@ -3124,7 +3129,7 @@ impl historical::SessionManager pallet_authorship::EventHandler for Module where - T: Trait + pallet_authorship::Trait + pallet_session::Trait + T: Config + pallet_authorship::Config + pallet_session::Config { fn note_author(author: T::AccountId) { Self::reward_by_ids(vec![(author, 20)]) @@ -3141,7 +3146,7 @@ impl pallet_authorship::EventHandler for Module /// if any. pub struct StashOf(sp_std::marker::PhantomData); -impl Convert> for StashOf { +impl Convert> for StashOf { fn convert(controller: T::AccountId) -> Option { >::ledger(&controller).map(|l| l.stash) } @@ -3154,7 +3159,7 @@ impl Convert> for StashOf { /// `active_era`. It can differ from the latest planned exposure in `current_era`. pub struct ExposureOf(sp_std::marker::PhantomData); -impl Convert>>> +impl Convert>>> for ExposureOf { fn convert(validator: T::AccountId) -> Option>> { @@ -3167,19 +3172,19 @@ impl Convert> } /// This is intended to be used with `FilterHistoricalOffences`. -impl +impl OnOffenceHandler, Weight> for Module where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, + T: pallet_session::Config::AccountId>, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, FullIdentificationOf = ExposureOf, >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: Convert< - ::AccountId, - Option<::AccountId>, + ::AccountId, + Option<::AccountId>, >, { fn on_offence( @@ -3310,7 +3315,7 @@ pub struct FilterHistoricalOffences { impl ReportOffence for FilterHistoricalOffences, R> where - T: Trait, + T: Config, R: ReportOffence, O: Offence, { @@ -3335,7 +3340,7 @@ impl ReportOffence } #[allow(deprecated)] -impl frame_support::unsigned::ValidateUnsigned for Module { +impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_election_solution_unsigned( diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 3aa3e9ae03d746a415fc3fef0ae2cd6f38d1b0d3..76689503f65ab8965c112fc19771b32352f0f254 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -129,9 +129,10 @@ pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = frame_support::weights::constants::WEIGHT_PER_SECOND * 2; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::constants::WEIGHT_PER_SECOND * 2 + ); pub const MaxLocks: u32 = 1024; pub static SessionsPerEra: SessionIndex = 3; pub static ExistentialDeposit: Balance = 0; @@ -141,8 +142,11 @@ parameter_types! { pub static MaxIterations: u32 = 0; } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -154,13 +158,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = MetaEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -168,7 +165,7 @@ impl frame_system::Trait for Test { type OnKilledAccount = (); type SystemWeightInfo = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = MaxLocks; type Balance = Balance; type Event = MetaEvent; @@ -187,7 +184,7 @@ sp_runtime::impl_opaque_keys! { pub other: OtherSessionHandler, } } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -200,11 +197,11 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = crate::Exposure; type FullIdentificationOf = crate::ExposureOf; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = Author11; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -213,7 +210,7 @@ impl pallet_authorship::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -235,7 +232,7 @@ parameter_types! { pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const UnsignedPriority: u64 = 1 << 20; pub const MinSolutionScoreBump: Perbill = Perbill::zero(); - pub const OffchainSolutionWeightLimit: Weight = MaximumBlockWeight::get(); + pub OffchainSolutionWeightLimit: Weight = BlockWeights::get().max_block; } thread_local! { @@ -253,7 +250,7 @@ impl OnUnbalanced> for RewardRemainderMock { } } -impl Trait for Test { +impl Config for Test { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -582,8 +579,18 @@ fn assert_is_stash(acc: AccountId) { fn assert_ledger_consistent(ctrl: AccountId) { // ensures ledger.total == ledger.active + sum(ledger.unlocking). let ledger = Staking::ledger(ctrl).expect("Not a controller."); - let real_total: Balance = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); + let real_total: Balance = ledger + .unlocking + .iter() + .fold(ledger.active, |a, c| a + c.value); assert_eq!(real_total, ledger.total); + assert!( + ledger.active >= Balances::minimum_balance() || ledger.active == 0, + "{}: active ledger amount ({}) must be greater than ED {}", + ctrl, + ledger.active, + Balances::minimum_balance() + ); } pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { @@ -659,7 +666,7 @@ pub(crate) fn start_era(era_index: EraIndex) { pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { inflation::compute_total_payout( - ::RewardCurve::get(), + ::RewardCurve::get(), Staking::eras_total_stake(Staking::active_era().unwrap().index), Balances::total_issuance(), duration, @@ -667,7 +674,7 @@ pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { } pub(crate) fn reward_all_elected() { - let rewards = ::SessionInterface::validators() + let rewards = ::SessionInterface::validators() .into_iter() .map(|v| (v, 1)); diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs index cb4d460f68035b4b3a506ebf140abc2078b84200..35d9fa7c1f850a40defc4eb20d2f10f303cc9fb7 100644 --- a/frame/staking/src/offchain_election.rs +++ b/frame/staking/src/offchain_election.rs @@ -19,7 +19,7 @@ use crate::{ Call, CompactAssignments, ElectionSize, Module, NominatorIndex, Nominators, OffchainAccuracy, - Trait, ValidatorIndex, WeightInfo, + Config, ValidatorIndex, WeightInfo, }; use codec::Decode; use frame_support::{traits::Get, weights::Weight, IterableStorageMap}; @@ -71,7 +71,7 @@ pub(crate) const DEFAULT_LONGEVITY: u64 = 25; /// don't run twice within a window of length [`OFFCHAIN_REPEAT`]. /// /// Returns `Ok(())` if offchain worker should happen, `Err(reason)` otherwise. -pub(crate) fn set_check_offchain_execution_status( +pub(crate) fn set_check_offchain_execution_status( now: T::BlockNumber, ) -> Result<(), &'static str> { let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); @@ -108,7 +108,7 @@ pub(crate) fn set_check_offchain_execution_status( /// The internal logic of the offchain worker of this module. This runs the phragmen election, /// compacts and reduces the solution, computes the score and submits it back to the chain as an /// unsigned transaction, without any signature. -pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { +pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { let iters = get_balancing_iters::(); // compute raw solution. Note that we use `OffchainAccuracy`. let ElectionResult { @@ -151,7 +151,7 @@ pub(crate) fn compute_offchain_election() -> Result<(), OffchainElecti /// Get a random number of iterations to run the balancing. /// /// Uses the offchain seed to generate a random number. -pub fn get_balancing_iters() -> usize { +pub fn get_balancing_iters() -> usize { match T::MaxIterations::get() { 0 => 0, max @ _ => { @@ -257,7 +257,7 @@ pub fn maximum_compact_len( /// /// Indeed, the score must be computed **after** this step. If this step reduces the score too much, /// then the solution will be discarded. -pub fn trim_to_weight( +pub fn trim_to_weight( maximum_allowed_voters: u32, mut compact: CompactAssignments, nominator_index: FN, @@ -318,7 +318,7 @@ where /// Takes an election result and spits out some data that can be submitted to the chain. /// /// This does a lot of stuff; read the inline comments. -pub fn prepare_submission( +pub fn prepare_submission( assignments: Vec>, winners: Vec<(T::AccountId, ExtendedBalance)>, do_reduce: bool, diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index af9a92f16a4636def4dcb99687ceabed183ae137..e59f2e84e43237c694cf83bfe35a0fee0379beeb 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -47,10 +47,10 @@ //! has multiple misbehaviors. However, accounting for such cases is necessary //! to deter a class of "rage-quit" attacks. //! -//! Based on research at https://research.web3.foundation/en/latest/polkadot/slashing/npos/ +//! Based on research at use super::{ - EraIndex, Trait, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, + EraIndex, Config, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, NegativeImbalanceOf, UnappliedSlash, Error, }; use sp_runtime::{traits::{Zero, Saturating}, RuntimeDebug, DispatchResult}; @@ -190,7 +190,7 @@ impl SpanRecord { /// Parameters for performing a slash. #[derive(Clone)] -pub(crate) struct SlashParams<'a, T: 'a + Trait> { +pub(crate) struct SlashParams<'a, T: 'a + Config> { /// The stash account being slashed. pub(crate) stash: &'a T::AccountId, /// The proportion of the slash. @@ -214,7 +214,7 @@ pub(crate) struct SlashParams<'a, T: 'a + Trait> { /// /// The pending slash record returned does not have initialized reporters. Those have /// to be set at a higher level, if any. -pub(crate) fn compute_slash(params: SlashParams) +pub(crate) fn compute_slash(params: SlashParams) -> Option>> { let SlashParams { @@ -309,7 +309,7 @@ pub(crate) fn compute_slash(params: SlashParams) // doesn't apply any slash, but kicks out the validator if the misbehavior is from // the most recent slashing span. -fn kick_out_if_recent( +fn kick_out_if_recent( params: SlashParams, ) { // these are not updated by era-span or end-span. @@ -338,7 +338,7 @@ fn kick_out_if_recent( /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. /// /// Returns the amount of reward to pay out. -fn slash_nominators( +fn slash_nominators( params: SlashParams, prior_slash_p: Perbill, nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, @@ -418,7 +418,7 @@ fn slash_nominators( // dropping this struct applies any necessary slashes, which can lead to free balance // being 0, and the account being garbage-collected -- a dead account should get no new // metadata. -struct InspectingSpans<'a, T: Trait + 'a> { +struct InspectingSpans<'a, T: Config + 'a> { dirty: bool, window_start: EraIndex, stash: &'a T::AccountId, @@ -430,7 +430,7 @@ struct InspectingSpans<'a, T: Trait + 'a> { } // fetches the slashing spans record for a stash account, initializing it if necessary. -fn fetch_spans<'a, T: Trait + 'a>( +fn fetch_spans<'a, T: Config + 'a>( stash: &'a T::AccountId, window_start: EraIndex, paid_out: &'a mut BalanceOf, @@ -455,7 +455,7 @@ fn fetch_spans<'a, T: Trait + 'a>( } } -impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { +impl<'a, T: 'a + Config> InspectingSpans<'a, T> { fn span_index(&self) -> SpanIndex { self.spans.span_index } @@ -526,7 +526,7 @@ impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { } } -impl<'a, T: 'a + Trait> Drop for InspectingSpans<'a, T> { +impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { fn drop(&mut self) { // only update on disk if we slashed this account. if !self.dirty { return } @@ -542,13 +542,13 @@ impl<'a, T: 'a + Trait> Drop for InspectingSpans<'a, T> { } /// Clear slashing metadata for an obsolete era. -pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { +pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); } /// Clear slashing metadata for a dead account. -pub(crate) fn clear_stash_metadata( +pub(crate) fn clear_stash_metadata( stash: &T::AccountId, num_slashing_spans: u32, ) -> DispatchResult { @@ -576,7 +576,7 @@ pub(crate) fn clear_stash_metadata( // apply the slash to a stash account, deducting any missing funds from the reward // payout, saturating at 0. this is mildly unfair but also an edge-case that // can only occur when overlapping locked funds have been slashed. -pub fn do_slash( +pub fn do_slash( stash: &T::AccountId, value: BalanceOf, reward_payout: &mut BalanceOf, @@ -613,7 +613,7 @@ pub fn do_slash( } /// Apply a previously-unapplied slash. -pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash>) { +pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash>) { let mut slashed_imbalance = NegativeImbalanceOf::::zero(); let mut reward_payout = unapplied_slash.payout; @@ -638,7 +638,7 @@ pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash( +fn pay_reporters( reward_payout: BalanceOf, slashed_imbalance: NegativeImbalanceOf, reporters: &[T::AccountId], diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 25cfffeac2c14404dd02b66b15666da4f915c189..2f198166d7ee0e2030adfd33105de438d0b33f16 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -29,13 +29,13 @@ use sp_npos_elections::*; const SEED: u32 = 0; /// This function removes all validators and nominators from storage. -pub fn clear_validators_and_nominators() { +pub fn clear_validators_and_nominators() { Validators::::remove_all(); Nominators::::remove_all(); } /// Grab a funded user. -pub fn create_funded_user( +pub fn create_funded_user( string: &'static str, n: u32, balance_factor: u32, @@ -49,7 +49,7 @@ pub fn create_funded_user( } /// Create a stash and controller pair. -pub fn create_stash_controller( +pub fn create_stash_controller( n: u32, balance_factor: u32, destination: RewardDestination, @@ -66,7 +66,7 @@ pub fn create_stash_controller( /// Create a stash and controller pair, where the controller is dead, and payouts go to controller. /// This is used to test worst case payout scenarios. -pub fn create_stash_and_dead_controller( +pub fn create_stash_and_dead_controller( n: u32, balance_factor: u32, destination: RewardDestination, @@ -83,7 +83,7 @@ pub fn create_stash_and_dead_controller( } /// create `max` validators. -pub fn create_validators( +pub fn create_validators( max: u32, balance_factor: u32, ) -> Result::Source>, &'static str> { @@ -115,7 +115,7 @@ pub fn create_validators( /// Else, all of them are considered and `edge_per_nominator` random validators are voted for. /// /// Return the validators choosen to be nominated. -pub fn create_validators_with_nominators_for_era( +pub fn create_validators_with_nominators_for_era( validators: u32, nominators: u32, edge_per_nominator: usize, @@ -173,7 +173,7 @@ pub fn create_validators_with_nominators_for_era( /// Build a _really bad_ but acceptable solution for election. This should always yield a solution /// which has a less score than the seq-phragmen. -pub fn get_weak_solution( +pub fn get_weak_solution( do_reduce: bool, ) -> (Vec, CompactAssignments, ElectionScore, ElectionSize) { let mut backing_stake_of: BTreeMap> = BTreeMap::new(); @@ -282,7 +282,7 @@ pub fn get_weak_solution( /// Create a solution for seq-phragmen. This uses the same internal function as used by the offchain /// worker code. -pub fn get_seq_phragmen_solution( +pub fn get_seq_phragmen_solution( do_reduce: bool, ) -> ( Vec, @@ -301,13 +301,13 @@ pub fn get_seq_phragmen_solution( assignments, winners, do_reduce, - T::MaximumBlockWeight::get(), + T::BlockWeights::get().max_block, ) .unwrap() } /// Returns a solution in which only one winner is elected with just a self vote. -pub fn get_single_winner_solution( +pub fn get_single_winner_solution( winner: T::AccountId, ) -> Result< ( @@ -352,7 +352,7 @@ pub fn get_single_winner_solution( } /// get the active era. -pub fn current_era() -> EraIndex { +pub fn current_era() -> EraIndex { >::current_era().unwrap_or(0) } @@ -366,7 +366,7 @@ pub fn init_active_era() { /// Create random assignments for the given list of winners. Each assignment will have /// MAX_NOMINATIONS edges. -pub fn create_assignments_for_offchain( +pub fn create_assignments_for_offchain( num_assignments: u32, winners: Vec<::Source>, ) -> Result< diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 2a02d87aa2c578ae5cd61905f39dd3dad93e1091..79edc012cd3f2db19f0061eaaa04f596b09bc46a 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -3267,7 +3267,7 @@ mod offchain_election { ElectionSize::default(), ), Error::::OffchainElectionEarlySubmission, - Some(::DbWeight::get().reads(1)), + Some(::DbWeight::get().reads(1)), ); }) } @@ -3303,7 +3303,7 @@ mod offchain_election { score, ), Error::::OffchainElectionWeakSubmission, - Some(::DbWeight::get().reads(3)) + Some(::DbWeight::get().reads(3)) ); }) } @@ -4340,7 +4340,7 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( // then the nominator can't claim its reward // * A nominator can't claim another nominator reward ExtBuilder::default().build_and_execute(|| { - for i in 0..=::MaxNominatorRewardedPerValidator::get() { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { let stash = 10_000 + i as AccountId; let controller = 20_000 + i as AccountId; let balance = 10_000 + i as Balance; @@ -4366,7 +4366,7 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( mock::make_all_reward_payment(1); // Assert only nominators from 1 to Max are rewarded - for i in 0..=::MaxNominatorRewardedPerValidator::get() { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; if stash == 10_000 { @@ -4569,14 +4569,14 @@ fn bond_during_era_correctly_populates_claimed_rewards() { fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write - let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); + let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), Ok(zero_offence_weight)); // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes - let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(4, 5); + let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(4, 5); - let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> + let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> = (1..10).map(|i| OffenceDetails { offender: (i, Staking::eras_stakers(Staking::active_era().unwrap().index, i)), @@ -4595,14 +4595,14 @@ fn offences_weight_calculated_correctly() { let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes - let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(rw, rw) + let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(rw, rw) // One `slash_cost` - + ::DbWeight::get().reads_writes(6, 5) + + ::DbWeight::get().reads_writes(6, 5) // `slash_cost` * nominators (1) - + ::DbWeight::get().reads_writes(6, 5) + + ::DbWeight::get().reads_writes(6, 5) // `reward_cost` * reporters (1) - + ::DbWeight::get().reads_writes(2, 2); + + ::DbWeight::get().reads_writes(2, 2); assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0), Ok(one_offence_unapplied_weight)); }); @@ -4614,7 +4614,7 @@ fn on_initialize_weight_is_correct() { assert_eq!(Validators::::iter().count(), 0); assert_eq!(Nominators::::iter().count(), 0); // When this pallet has nothing, we do 4 reads each block - let base_weight = ::DbWeight::get().reads(4); + let base_weight = ::DbWeight::get().reads(4); assert_eq!(base_weight, Staking::on_initialize(0)); }); @@ -4636,7 +4636,7 @@ fn on_initialize_weight_is_correct() { // With 4 validators and 5 nominator, we should increase weight by: // - (4 + 5) reads // - 3 Writes - let final_weight = ::DbWeight::get().reads_writes(4 + 9, 3); + let final_weight = ::DbWeight::get().reads_writes(4 + 9, 3); assert_eq!(final_weight, Staking::on_initialize(System::block_number())); }); } @@ -4696,3 +4696,85 @@ fn payout_to_any_account_works() { assert!(Balances::free_balance(42) > 0); }) } + +#[test] +fn cannot_bond_extra_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(10) + .build_and_execute(|| { + // stash must have more balance than bonded for this to work. + assert_eq!(Balances::free_balance(&21), 512_000); + + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // unbond all of it. + assert_ok!(Staking::unbond(Origin::signed(20), 1000)); + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 0, + unlocking: vec![UnlockChunk { value: 1000, era: 3 }], + claimed_rewards: vec![] + } + ); + + // now bond a wee bit more + assert_noop!( + Staking::bond_extra(Origin::signed(21), 5), + Error::::InsufficientValue, + ); + }) +} + +#[test] +fn cannot_rebond_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(10) + .build_and_execute(|| { + // stash must have more balance than bonded for this to work. + assert_eq!(Balances::free_balance(&21), 512_000); + + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // unbond all of it. + assert_ok!(Staking::unbond(Origin::signed(20), 1000)); + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 0, + unlocking: vec![UnlockChunk { value: 1000, era: 3 }], + claimed_rewards: vec![] + } + ); + + // now bond a wee bit more + assert_noop!( + Staking::rebond(Origin::signed(20), 5), + Error::::InsufficientValue, + ); + }) +} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index cb301276e0f0559e3d6d968e015bea637bedcc3e..2e715c53356fce5893e74ed081422b7e2b924af0 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -72,7 +72,7 @@ pub trait WeightInfo { /// Weights for pallet_staking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn bond() -> Weight { (99_659_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) diff --git a/frame/sudo/README.md b/frame/sudo/README.md index 233727ac1bd28b819d0b4b724e239ba3d6e8a9a2..95ca7ce88d972ec43659a43cd9e7353f6448b223 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -38,10 +38,10 @@ This is an example of a module that exposes a privileged function: use frame_support::{decl_module, dispatch}; use frame_system::ensure_root; -pub trait Trait: frame_system::Trait {} +pub trait Config: frame_system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn privileged_function(origin) -> dispatch::DispatchResult { ensure_root(origin)?; @@ -64,7 +64,7 @@ You need to set an initial superuser account as the sudo `key`. * [Democracy](https://docs.rs/pallet-democracy/latest/pallet_democracy/) [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html [`Origin`]: https://docs.substrate.dev/docs/substrate-types -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 0d21e4432666825a7fc99e665ad47fdf073b6c52..e8a13c8b00f07ce16d9524de5d62f5f6de8e78d9 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -17,7 +17,7 @@ //! # Sudo Module //! -//! - [`sudo::Trait`](./trait.Trait.html) +//! - [`sudo::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -55,10 +55,10 @@ //! use frame_support::{decl_module, dispatch}; //! use frame_system::ensure_root; //! -//! pub trait Trait: frame_system::Trait {} +//! pub trait Config: frame_system::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn privileged_function(origin) -> dispatch::DispatchResult { //! ensure_root(origin)?; @@ -82,7 +82,7 @@ //! * [Democracy](../pallet_democracy/index.html) //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html //! [`Origin`]: https://docs.substrate.dev/docs/substrate-types #![cfg_attr(not(feature = "std"), no_std)] @@ -105,9 +105,9 @@ mod mock; #[cfg(test)] mod tests; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// A sudo-able call. type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; @@ -115,7 +115,7 @@ pub trait Trait: frame_system::Trait { decl_module! { /// Sudo module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -131,7 +131,7 @@ decl_module! { /// - Weight of derivative `call` execution + 10,000. /// # #[weight = (call.get_dispatch_info().weight + 10_000, call.get_dispatch_info().class)] - fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { + fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); @@ -153,7 +153,7 @@ decl_module! { /// - The weight of this call is defined by the caller. /// # #[weight = (*_weight, call.get_dispatch_info().class)] - fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { + fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); @@ -206,7 +206,7 @@ decl_module! { )] fn sudo_as(origin, who: ::Source, - call: Box<::Call> + call: Box<::Call> ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -214,15 +214,9 @@ decl_module! { let who = T::Lookup::lookup(who)?; - let res = match call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()) { - Ok(_) => true, - Err(e) => { - sp_runtime::print(e); - false - } - }; + let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()); - Self::deposit_event(RawEvent::SudoAsDone(res)); + Self::deposit_event(RawEvent::SudoAsDone(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -230,18 +224,18 @@ decl_module! { } decl_event!( - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { /// A sudo just took place. \[result\] Sudid(DispatchResult), /// The \[sudoer\] just switched identity; the old key is supplied. KeyChanged(AccountId), /// A sudo just took place. \[result\] - SudoAsDone(bool), + SudoAsDone(DispatchResult), } ); decl_storage! { - trait Store for Module as Sudo { + trait Store for Module as Sudo { /// The `AccountId` of the sudo key. Key get(fn key) config(): T::AccountId; } @@ -249,7 +243,7 @@ decl_storage! { decl_error! { /// Error for the Sudo module - pub enum Error for Module { + pub enum Error for Module { /// Sender must be the Sudo account RequireSudo, } diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 7996cd05d071faee321cff2bd001bbff8350ebed..12707d3e9da67e9c1ea6c66b21ed6f461ce5915d 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -23,36 +23,37 @@ use frame_support::{ weights::Weight, }; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use sp_io; use crate as sudo; use frame_support::traits::Filter; +use frame_system::limits; // Logger module to track execution. pub mod logger { use super::*; use frame_system::ensure_root; - pub trait Trait: frame_system::Trait { - type Event: From> + Into<::Event>; + pub trait Config: frame_system::Config { + type Event: From> + Into<::Event>; } decl_storage! { - trait Store for Module as Logger { + trait Store for Module as Logger { AccountLog get(fn account_log): Vec; I32Log get(fn i32_log): Vec; } } decl_event! { - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { AppendI32(i32, Weight), AppendI32AndAccount(AccountId, i32, Weight), } } decl_module! { - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { fn deposit_event() = default; #[weight = *weight] @@ -106,9 +107,7 @@ pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(1024); } pub struct BlockEverything; @@ -118,8 +117,11 @@ impl Filter for BlockEverything { } } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BlockEverything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -131,13 +133,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -146,13 +141,13 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -// Implement the logger module's `Trait` on the Test runtime. -impl logger::Trait for Test { +// Implement the logger module's `Config` on the Test runtime. +impl logger::Config for Test { type Event = TestEvent; } -// Implement the sudo module's `Trait` on the Test runtime. -impl Trait for Test { +// Implement the sudo module's `Config` on the Test runtime. +impl Config for Test { type Event = TestEvent; type Call = Call; } diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index cba1e1cf605404fb752ef1bcf0a207b13b2991b3..03ce100c3a40a87af8eae493e2a4d70d5487e69c 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -163,7 +163,7 @@ fn sudo_as_emits_events_correctly() { // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); - let expected_event = TestEvent::sudo(RawEvent::SudoAsDone(true)); + let expected_event = TestEvent::sudo(RawEvent::SudoAsDone(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }); } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 1f7fe9a2025384d39f28c0bb45027732d103f593..0189dc172fb65b5729c3f92e4900ab7b062de089 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -35,7 +35,7 @@ smallvec = "1.4.1" [dev-dependencies] pretty_assertions = "0.6.1" frame-system = { version = "2.0.0", path = "../system" } -parity-util-mem = { version = "0.7.0", features = ["primitive-types"] } +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } sp-api = { version = "2.0.0", default-features = false, path = "../../primitives/api" } diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index a6fb58846cbad319545f8a98e98335a3599a46c1..8d3d1ce590040a02aafffc157f9c1ef8fc4c9e8f 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -35,7 +35,7 @@ use proc_macro::TokenStream; /// /// ```nocompile /// decl_storage! { -/// trait Store for Module as Example { +/// trait Store for Module as Example { /// Foo get(fn foo) config(): u32=12; /// Bar: map hasher(identity) u32 => u32; /// pub Zed build(|config| vec![(0, 0)]): map hasher(identity) u32 => u32; @@ -43,7 +43,7 @@ use proc_macro::TokenStream; /// } /// ``` /// -/// Declaration is set with the header `(pub) trait Store for Module as Example`, +/// Declaration is set with the header `(pub) trait Store for Module as Example`, /// with `Store` a (pub) trait generated associating each storage item to the `Module` and /// `as Example` setting the prefix used for storage items of this module. `Example` must be unique: /// another module with the same name and the same inner storage item name will conflict. @@ -169,7 +169,7 @@ use proc_macro::TokenStream; /// /// ```nocompile /// decl_storage! { -/// trait Store for Module as Example { +/// trait Store for Module as Example { /// /// // Your storage items /// } @@ -202,7 +202,7 @@ use proc_macro::TokenStream; /// (`DefaultInstance` type is optional): /// /// ```nocompile -/// trait Store for Module, I: Instance=DefaultInstance> as Example {} +/// trait Store for Module, I: Instance=DefaultInstance> as Example {} /// ``` /// /// Accessing the structure no requires the instance as generic parameter: @@ -214,7 +214,7 @@ use proc_macro::TokenStream; /// This macro supports a where clause which will be replicated to all generated types. /// /// ```nocompile -/// trait Store for Module as Example where T::AccountId: std::fmt::Display {} +/// trait Store for Module as Example where T::AccountId: std::fmt::Display {} /// ``` /// /// ## Limitations diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index 6339134ea0d22681ac413b60613b66080dec10bd..93543075a3d2b61880356905419760b10a632ebf 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -33,11 +33,11 @@ pub struct GenesisConfigFieldDef { pub struct GenesisConfigDef { pub is_generic: bool, pub fields: Vec, - /// For example: `, I: Instance=DefaultInstance>`. + /// For example: `, I: Instance=DefaultInstance>`. pub genesis_struct_decl: TokenStream, /// For example: ``. pub genesis_struct: TokenStream, - /// For example: `, I: Instance>`. + /// For example: `, I: Instance>`. pub genesis_impl: TokenStream, /// The where clause to use to constrain generics if genesis config is generic. pub genesis_where_clause: Option, diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index 0aa0a3cad7cd1e1ec8b55f57781ffa2bf7265a51..bc23dad74bcd5b2379b59f0c1e130ac2df9b4db5 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -42,7 +42,7 @@ pub struct DeclStorageDef { module_name: syn::Ident, /// Usually `T`. module_runtime_generic: syn::Ident, - /// Usually `Trait` + /// Usually `Config` module_runtime_trait: syn::Path, /// For instantiable module: usually `I: Instance=DefaultInstance`. module_instance: Option, @@ -77,7 +77,7 @@ pub struct DeclStorageDefExt { module_name: syn::Ident, /// Usually `T`. module_runtime_generic: syn::Ident, - /// Usually `Trait`. + /// Usually `Config`. module_runtime_trait: syn::Path, /// For instantiable module: usually `I: Instance=DefaultInstance`. module_instance: Option, @@ -93,7 +93,7 @@ pub struct DeclStorageDefExt { crate_name: syn::Ident, /// Full struct expansion: `Module`. module_struct: proc_macro2::TokenStream, - /// Impl block for module: ``. + /// Impl block for module: ``. module_impl: proc_macro2::TokenStream, /// For instantiable: `I`. optional_instance: Option, @@ -212,7 +212,7 @@ pub struct StorageLineDefExt { storage_struct: proc_macro2::TokenStream, /// If storage is generic over runtime then `T`. optional_storage_runtime_comma: Option, - /// If storage is generic over runtime then `T: Trait`. + /// If storage is generic over runtime then `T: Config`. optional_storage_runtime_bound_comma: Option, /// The where clause to use to constrain generics if storage is generic over runtime. optional_storage_where_clause: Option, diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 2cff2473b85d4415386e137695f7fc73a9ba8347..b9a9cc7adb0d5ffea249adea16f517b8d8f7beb9 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -16,4 +16,4 @@ frame-support-procedural-tools-derive = { version = "2.0.0", path = "./derive" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full", "visit"] } -proc-macro-crate = "0.1.4" +proc-macro-crate = "0.1.5" diff --git a/frame/support/src/debug.rs b/frame/support/src/debug.rs index 04f5c529f0aff0d56832fd4cf5f4b36c1b46e282..54a1e9c3a03781d76901f23d7ffe6515ce19391a 100644 --- a/frame/support/src/debug.rs +++ b/frame/support/src/debug.rs @@ -134,7 +134,7 @@ macro_rules! runtime_print { use core::fmt::Write; let mut w = $crate::sp_std::Writer::default(); let _ = core::write!(&mut w, $($arg)+); - sp_io::misc::print_utf8(&w.inner()) + $crate::sp_io::misc::print_utf8(&w.inner()) } } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index d55faa28d115bbddc2ada02fe736cc809af21fc7..2477f9421ffec4aabd210f4f8cec81324daa07b9 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -72,9 +72,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// /// // Private functions are dispatchable, but not available to other /// // FRAME pallets. @@ -98,7 +98,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// /// The declaration is set with the header where: /// -/// * `Module`: The struct generated by the macro, with type `Trait`. +/// * `Module`: The struct generated by the macro, with type `Config`. /// * `Call`: The enum generated for every pallet, which implements [`Callable`](./dispatch/trait.Callable.html). /// * `origin`: Alias of `T::Origin`, declared by the [`impl_outer_origin!`](./macro.impl_outer_origin.html) macro. /// * `Result`: The expected return type from pallet functions. @@ -114,9 +114,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_long_function(origin) -> dispatch::DispatchResult { /// // Your implementation @@ -149,9 +149,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 1_000_000] /// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { /// ensure_signed(origin).map_err(|e| e.with_weight(100_000))?; @@ -178,9 +178,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::transactional; -/// # use frame_system::Trait; +/// # use frame_system::Config; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// #[transactional] /// fn my_short_function(origin) { @@ -199,9 +199,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed, ensure_root}; +/// # use frame_system::{Config, ensure_signed, ensure_root}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_privileged_function(origin) -> dispatch::DispatchResult { /// ensure_root(origin)?; @@ -236,10 +236,10 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # pub struct DefaultInstance; /// # pub trait Instance: 'static {} /// # impl Instance for DefaultInstance {} -/// pub trait Trait: frame_system::Trait {} +/// pub trait Config: frame_system::Config {} /// /// decl_module! { -/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { +/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { /// // Your implementation /// } /// } @@ -261,10 +261,10 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # extern crate frame_support; /// # use frame_support::dispatch; /// # use frame_system::{self as system, ensure_signed}; -/// pub trait Trait: system::Trait where Self::AccountId: From {} +/// pub trait Config: system::Config where Self::AccountId: From {} /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { +/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { /// // Your implementation /// } /// } @@ -1272,11 +1272,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_initialize() -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_initialize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) -> $return { + fn on_initialize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) -> $return { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_initialize")); { $( $impl )* } } @@ -1289,8 +1289,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_initialize($param:ident : $param_ty:ty) -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_initialize($param: $param_ty) -> $return { @@ -1305,8 +1305,8 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1326,10 +1326,10 @@ macro_rules! decl_module { let result: $return = (|| { $( $impl )* })(); $crate::crate_to_pallet_version!() - .put_into_storage::<<$trait_instance as $system::Trait>::PalletInfo, Self>(); + .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); let additional_write = < - <$trait_instance as $system::Trait>::DbWeight as $crate::traits::Get<_> + <$trait_instance as $system::Config>::DbWeight as $crate::traits::Get<_> >::get().writes(1); result.saturating_add(additional_write) @@ -1350,10 +1350,10 @@ macro_rules! decl_module { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); $crate::crate_to_pallet_version!() - .put_into_storage::<<$trait_instance as $system::Trait>::PalletInfo, Self>(); + .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); < - <$trait_instance as $system::Trait>::DbWeight as $crate::traits::Get<_> + <$trait_instance as $system::Config>::DbWeight as $crate::traits::Get<_> >::get().writes(1) } } @@ -1394,11 +1394,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_finalize() { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_finalize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { + fn on_finalize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_finalize")); { $( $impl )* } } @@ -1411,8 +1411,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_finalize($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_finalize($param: $param_ty) { @@ -1427,8 +1427,8 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { } @@ -1440,11 +1440,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn offchain_worker() { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { $( $impl )* } + fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { $( $impl )* } } }; @@ -1454,8 +1454,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn offchain_worker($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn offchain_worker($param: $param_ty) { $( $impl )* } @@ -1467,8 +1467,8 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1824,7 +1824,7 @@ macro_rules! decl_module { fn storage_version() -> Option<$crate::traits::PalletVersion> { let key = $crate::traits::PalletVersion::storage_key::< - <$trait_instance as $system::Trait>::PalletInfo, Self + <$trait_instance as $system::Config>::PalletInfo, Self >().expect("Every active pallet has a name in the runtime; qed"); $crate::storage::unhashed::get(&key) @@ -1837,7 +1837,7 @@ macro_rules! decl_module { { fn on_genesis() { $crate::crate_to_pallet_version!() - .put_into_storage::<<$trait_instance as $system::Trait>::PalletInfo, Self>(); + .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); } } @@ -2019,7 +2019,7 @@ macro_rules! impl_outer_dispatch { } impl $crate::dispatch::Dispatchable for $call_type { type Origin = $origin; - type Trait = $call_type; + type Config = $call_type; type Info = $crate::weights::DispatchInfo; type PostInfo = $crate::weights::PostDispatchInfo; fn dispatch( @@ -2412,12 +2412,12 @@ mod tests { IntegrityTest, Get, }; - pub trait Trait: system::Trait + Sized where Self::AccountId: From { } + pub trait Config: system::Config + Sized where Self::AccountId: From { } pub mod system { use super::*; - pub trait Trait: 'static { + pub trait Config: 'static { type AccountId; type Call; type BaseCallFilter; @@ -2443,11 +2443,11 @@ mod tests { } } - pub type Origin = RawOrigin<::AccountId>; + pub type Origin = RawOrigin<::AccountId>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { + pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { /// Hi, this is a comment. #[weight = 0] fn aux_0(_origin) -> DispatchResult { unreachable!() } @@ -2548,7 +2548,7 @@ mod tests { ]; pub struct TraitImpl {} - impl Trait for TraitImpl { } + impl Config for TraitImpl { } type Test = Module; @@ -2562,7 +2562,7 @@ mod tests { } } - impl system::Trait for TraitImpl { + impl system::Config for TraitImpl { type Origin = OuterOrigin; type AccountId = u32; type Call = OuterCall; diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index c0a886907d0b634cb4a64711aeaf5edc5f1a4444..0e3f66f9f3c952ac1a9767ea2491f71c4f3e65ce 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -39,7 +39,7 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// # /// decl_error! { /// /// Errors that can occur in my module. -/// pub enum MyError for Module { +/// pub enum MyError for Module { /// /// Hey this is an error message that indicates bla. /// MyCoolErrorMessage, /// /// You are just not cool enough for my module! @@ -47,13 +47,13 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// } /// } /// -/// # use frame_system::Trait; +/// # use frame_system::Config; /// /// // You need to register the error type in `decl_module!` as well to make the error /// // exported in the metadata. /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// type Error = MyError; /// /// #[weight = 0] diff --git a/frame/support/src/event.rs b/frame/support/src/event.rs index 3538748c30faf7b9eadae95c3c91c1a12e647f24..3cb91e4a3e31bec067e80ea2449428ac9f175679 100644 --- a/frame/support/src/event.rs +++ b/frame/support/src/event.rs @@ -37,7 +37,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// # Generic Event Example: /// /// ```rust -/// trait Trait { +/// trait Config { /// type Balance; /// type Token; /// } @@ -45,7 +45,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// mod event1 { /// // Event that specifies the generic parameter explicitly (`Balance`). /// frame_support::decl_event!( -/// pub enum Event where Balance = ::Balance { +/// pub enum Event where Balance = ::Balance { /// Message(Balance), /// } /// ); @@ -56,7 +56,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// // If no name for the generic parameter is specified explicitly, /// // the name will be taken from the type name of the trait. /// frame_support::decl_event!( -/// pub enum Event where ::Balance { +/// pub enum Event where ::Balance { /// Message(Balance), /// } /// ); @@ -65,7 +65,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// mod event3 { /// // And we even support declaring multiple generic parameters! /// frame_support::decl_event!( -/// pub enum Event where ::Balance, ::Token { +/// pub enum Event where ::Balance, ::Token { /// Message(Balance, Token), /// } /// ); @@ -82,7 +82,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE ///# struct DefaultInstance; ///# trait Instance {} ///# impl Instance for DefaultInstance {} -/// trait Trait { +/// trait Config { /// type Balance; /// type Token; /// } @@ -90,8 +90,8 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// // For module with instances, DefaultInstance is optional /// frame_support::decl_event!( /// pub enum Event where -/// ::Balance, -/// ::Token +/// ::Balance, +/// ::Token /// { /// Message(Balance, Token), /// } @@ -258,10 +258,10 @@ macro_rules! __decl_generic_event { { $( $events:tt )* }; { ,$( $generic_param:ident = $generic_type:ty ),* }; ) => { - /// [`RawEvent`] specialized for the configuration [`Trait`] + /// [`RawEvent`] specialized for the configuration [`Config`] /// /// [`RawEvent`]: enum.RawEvent.html - /// [`Trait`]: trait.Trait.html + /// [`Config`]: trait.Config.html pub type Event<$event_generic_param $(, $instance $( = $event_default_instance)? )?> = RawEvent<$( $generic_type ),* $(, $instance)? >; #[derive( @@ -551,7 +551,7 @@ mod tests { use codec::{Encode, Decode}; mod system { - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -559,7 +559,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } decl_event!( @@ -570,7 +570,7 @@ mod tests { } mod system_renamed { - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -578,7 +578,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } decl_event!( @@ -591,17 +591,17 @@ mod tests { mod event_module { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event without renaming the generic parameter `Balance` and `Origin`. - pub enum Event where ::Balance, ::Origin + pub enum Event where ::Balance, ::Origin { /// Hi, I am a comment. TestEvent(Balance, Origin), @@ -614,19 +614,19 @@ mod tests { mod event_module2 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event with renamed generic parameter pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin { TestEvent(BalanceRenamed), TestOrigin(OriginRenamed), @@ -645,19 +645,19 @@ mod tests { mod event_module4 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event finish formatting on an unnamed one with trailing comma pub enum Event where - ::Balance, - ::Origin, + ::Balance, + ::Origin, { TestEvent(Balance, Origin), } @@ -667,19 +667,19 @@ mod tests { mod event_module5 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } decl_event!( /// Event finish formatting on an named one with trailing comma pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin, + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin, { TestEvent(BalanceRenamed, OriginRenamed), TrailingCommaInArgs( @@ -714,37 +714,37 @@ mod tests { } } - impl event_module::Trait for TestRuntime { + impl event_module::Config for TestRuntime { type Balance = u32; } - impl event_module2::Trait for TestRuntime { + impl event_module2::Config for TestRuntime { type Balance = u32; } - impl system::Trait for TestRuntime { + impl system::Config for TestRuntime { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); type DbWeight = (); } - impl event_module::Trait for TestRuntime2 { + impl event_module::Config for TestRuntime2 { type Balance = u32; } - impl event_module2::Trait for TestRuntime2 { + impl event_module2::Config for TestRuntime2 { type Balance = u32; } - impl system_renamed::Trait for TestRuntime2 { + impl system_renamed::Config for TestRuntime2 { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); type DbWeight = (); } - impl system::Trait for TestRuntime2 { + impl system::Config for TestRuntime2 { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 5dd452dbbe7b94554cbd8d7b9f9834cab475fe88..55bca2610a18b3cc9103f4c079d9e2ae5d24701a 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -102,9 +102,8 @@ pub enum Never {} /// /// - Using `static` to create a static parameter type. Its value is /// being provided by a static variable with the equivalent name in `UPPER_SNAKE_CASE`. An -/// additional `set` function is provided in this case to alter the static variable. -/// -/// **This is intended for testing ONLY and is ONLY available when `std` is enabled** +/// additional `set` function is provided in this case to alter the static variable. +/// **This is intended for testing ONLY and is ONLY available when `std` is enabled.** /// /// # Examples /// @@ -347,14 +346,14 @@ pub use frame_support_procedural::{ /// This is useful for type generic over runtime: /// ``` /// # use frame_support::CloneNoBound; -/// trait Trait { +/// trait Config { /// type C: Clone; /// } /// /// // Foo implements [`Clone`] because `C` bounds [`Clone`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Clone`]. /// #[derive(CloneNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -365,14 +364,14 @@ pub use frame_support_procedural::CloneNoBound; /// This is useful for type generic over runtime: /// ``` /// # use frame_support::{EqNoBound, PartialEqNoBound}; -/// trait Trait { +/// trait Config { /// type C: Eq; /// } /// /// // Foo implements [`Eq`] because `C` bounds [`Eq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Eq`]. /// #[derive(PartialEqNoBound, EqNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -383,14 +382,14 @@ pub use frame_support_procedural::EqNoBound; /// This is useful for type generic over runtime: /// ``` /// # use frame_support::PartialEqNoBound; -/// trait Trait { +/// trait Config { /// type C: PartialEq; /// } /// /// // Foo implements [`PartialEq`] because `C` bounds [`PartialEq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`PartialEq`]. /// #[derive(PartialEqNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -402,14 +401,14 @@ pub use frame_support_procedural::PartialEqNoBound; /// ``` /// # use frame_support::DebugNoBound; /// # use core::fmt::Debug; -/// trait Trait { +/// trait Config { /// type C: Debug; /// } /// /// // Foo implements [`Debug`] because `C` bounds [`Debug`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Debug`]. /// #[derive(DebugNoBound)] -/// struct Foo { +/// struct Foo { /// c: T::C, /// } /// ``` @@ -488,7 +487,6 @@ macro_rules! ensure { /// /// Used as `assert_noop(expression_to_assert, expected_error_expression)`. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_noop { ( $x:expr, @@ -504,7 +502,6 @@ macro_rules! assert_noop { /// /// Used as `assert_err!(expression_to_assert, expected_error_expression)` #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err { ( $x:expr , $y:expr $(,)? ) => { assert_eq!($x, Err($y.into())); @@ -516,7 +513,6 @@ macro_rules! assert_err { /// This can be used on`DispatchResultWithPostInfo` when the post info should /// be ignored. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err_ignore_postinfo { ( $x:expr , $y:expr $(,)? ) => { $crate::assert_err!($x.map(|_| ()).map_err(|e| e.error), $y); @@ -525,7 +521,6 @@ macro_rules! assert_err_ignore_postinfo { /// Assert an expression returns error with the given weight. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err_with_weight { ($call:expr, $err:expr, $weight:expr $(,)? ) => { if let Err(dispatch_err_with_post) = $call { @@ -542,7 +537,6 @@ macro_rules! assert_err_with_weight { /// Used as `assert_ok!(expression_to_assert, expected_ok_expression)`, /// or `assert_ok!(expression_to_assert)` which would assert against `Ok(())`. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_ok { ( $x:expr $(,)? ) => { let is = $x; @@ -571,7 +565,7 @@ mod tests { use sp_std::{marker::PhantomData, result}; use sp_io::TestExternalities; - pub trait Trait: 'static { + pub trait Config: 'static { type BlockNumber: Codec + EncodeLike + Default; type Origin; type PalletInfo: crate::traits::PalletInfo; @@ -581,16 +575,16 @@ mod tests { mod module { #![allow(dead_code)] - use super::Trait; + use super::Config; decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } } use self::module::Module; decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { pub Data get(fn data) build(|_| vec![(15u32, 42u64)]): map hasher(twox_64_concat) u32 => u64; pub OptionLinkedMap: map hasher(blake2_128_concat) u32 => Option; @@ -612,7 +606,7 @@ mod tests { } struct Test; - impl Trait for Test { + impl Config for Test { type BlockNumber = u32; type Origin = u32; type PalletInfo = (); diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs index 80737e4b13d6f9cd519a79cb3341c9dbaf8d5f70..f72365985da0a969263b657c8068b0f013dbd34a 100644 --- a/frame/support/src/metadata.rs +++ b/frame/support/src/metadata.rs @@ -27,23 +27,23 @@ pub use frame_metadata::{ /// Example: /// ``` ///# mod module0 { -///# pub trait Trait: 'static { +///# pub trait Config: 'static { ///# type Origin; ///# type BlockNumber; ///# type PalletInfo: frame_support::traits::PalletInfo; ///# type DbWeight: frame_support::traits::Get; ///# } ///# frame_support::decl_module! { -///# pub struct Module for enum Call where origin: T::Origin, system=self {} +///# pub struct Module for enum Call where origin: T::Origin, system=self {} ///# } ///# ///# frame_support::decl_storage! { -///# trait Store for Module as TestStorage {} +///# trait Store for Module as TestStorage {} ///# } ///# } ///# use module0 as module1; ///# use module0 as module2; -///# impl module0::Trait for Runtime { +///# impl module0::Config for Runtime { ///# type Origin = u32; ///# type BlockNumber = u32; ///# type PalletInfo = (); @@ -297,7 +297,7 @@ mod tests { mod system { use super::*; - pub trait Trait: 'static { + pub trait Config: 'static { type BaseCallFilter; const ASSOCIATED_CONST: u64 = 500; type Origin: Into, Self::Origin>> @@ -311,7 +311,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { /// Hi, I am a comment. const BlockNumber: T::BlockNumber = 100.into(); const GetType: T::AccountId = T::SomeValue::get().into(); @@ -341,19 +341,19 @@ mod tests { } } - pub type Origin = RawOrigin<::AccountId>; + pub type Origin = RawOrigin<::AccountId>; } mod event_module { use crate::dispatch::DispatchResult; use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_event!( - pub enum Event where ::Balance + pub enum Event where ::Balance { /// Hi, I am a comment. TestEvent(Balance), @@ -361,7 +361,7 @@ mod tests { ); decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system { + pub struct Module for enum Call where origin: T::Origin, system=system { type Error = Error; #[weight = 0] @@ -370,7 +370,7 @@ mod tests { } crate::decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Some user input error UserInputError, /// Something bad happened @@ -383,23 +383,23 @@ mod tests { mod event_module2 { use super::system; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Balance; } decl_event!( - pub enum Event where ::Balance + pub enum Event where ::Balance { TestEvent(Balance), } ); decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } crate::decl_storage! { - trait Store for Module as TestStorage { + trait Store for Module as TestStorage { StorageMethod : Option; } add_extra_genesis { @@ -433,11 +433,11 @@ mod tests { } } - impl event_module::Trait for TestRuntime { + impl event_module::Config for TestRuntime { type Balance = u32; } - impl event_module2::Trait for TestRuntime { + impl event_module2::Config for TestRuntime { type Balance = u32; } @@ -445,7 +445,7 @@ mod tests { pub const SystemValue: u32 = 600; } - impl system::Trait for TestRuntime { + impl system::Config for TestRuntime { type BaseCallFilter = (); type Origin = Origin; type AccountId = u32; @@ -480,7 +480,7 @@ mod tests { struct ConstantAssociatedConstByteGetter; impl DefaultByte for ConstantAssociatedConstByteGetter { fn default_byte(&self) -> Vec { - ::ASSOCIATED_CONST.encode() + ::ASSOCIATED_CONST.encode() } } diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs index b96a56c8e1d8f81a102cd0f37927b70c810638f1..980ab902a389d128bf760054b9a807091f93d536 100644 --- a/frame/support/src/origin.rs +++ b/frame/support/src/origin.rs @@ -181,12 +181,12 @@ macro_rules! impl_outer_origin { index { $( $index:tt )? }, )* ) => { - // WARNING: All instance must hold the filter `frame_system::Trait::BaseCallFilter`, except + // WARNING: All instance must hold the filter `frame_system::Config::BaseCallFilter`, except // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. #[derive(Clone)] pub struct $name { caller: $caller_name, - filter: $crate::sp_std::rc::Rc::Call) -> bool>>, + filter: $crate::sp_std::rc::Rc::Call) -> bool>>, } #[cfg(not(feature = "std"))] @@ -213,9 +213,9 @@ macro_rules! impl_outer_origin { } impl $crate::traits::OriginTrait for $name { - type Call = <$runtime as $system::Trait>::Call; + type Call = <$runtime as $system::Config>::Call; type PalletsOrigin = $caller_name; - type AccountId = <$runtime as $system::Trait>::AccountId; + type AccountId = <$runtime as $system::Config>::AccountId; fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static) { let f = self.filter.clone(); @@ -227,8 +227,8 @@ macro_rules! impl_outer_origin { fn reset_filter(&mut self) { let filter = < - <$runtime as $system::Trait>::BaseCallFilter - as $crate::traits::Filter<<$runtime as $system::Trait>::Call> + <$runtime as $system::Config>::BaseCallFilter + as $crate::traits::Filter<<$runtime as $system::Config>::Call> >::filter; self.filter = $crate::sp_std::rc::Rc::new(Box::new(filter)); @@ -246,7 +246,7 @@ macro_rules! impl_outer_origin { &self.caller } - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. fn none() -> Self { $system::RawOrigin::None.into() } @@ -254,8 +254,8 @@ macro_rules! impl_outer_origin { fn root() -> Self { $system::RawOrigin::Root.into() } - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. - fn signed(by: <$runtime as $system::Trait>::AccountId) -> Self { + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + fn signed(by: <$runtime as $system::Config>::AccountId) -> Self { $system::RawOrigin::Signed(by).into() } } @@ -280,7 +280,7 @@ macro_rules! impl_outer_origin { // For backwards compatibility and ease of accessing these functions. #[allow(dead_code)] impl $name { - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. pub fn none() -> Self { <$name as $crate::traits::OriginTrait>::none() } @@ -288,8 +288,8 @@ macro_rules! impl_outer_origin { pub fn root() -> Self { <$name as $crate::traits::OriginTrait>::root() } - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. - pub fn signed(by: <$runtime as $system::Trait>::AccountId) -> Self { + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + pub fn signed(by: <$runtime as $system::Config>::AccountId) -> Self { <$name as $crate::traits::OriginTrait>::signed(by) } } @@ -302,7 +302,7 @@ macro_rules! impl_outer_origin { impl From<$system::Origin<$runtime>> for $name { /// Convert to runtime origin: /// * root origin is built with no filter - /// * others use `frame-system::Trait::BaseCallFilter` + /// * others use `frame-system::Config::BaseCallFilter` fn from(x: $system::Origin<$runtime>) -> Self { let o: $caller_name = x.into(); o.into() @@ -335,10 +335,10 @@ macro_rules! impl_outer_origin { } } } - impl From::AccountId>> for $name { + impl From::AccountId>> for $name { /// Convert to runtime origin with caller being system signed or none and use filter - /// `frame-system::Trait::BaseCallFilter`. - fn from(x: Option<<$runtime as $system::Trait>::AccountId>) -> Self { + /// `frame-system::Config::BaseCallFilter`. + fn from(x: Option<<$runtime as $system::Config>::AccountId>) -> Self { <$system::Origin<$runtime>>::from(x).into() } } @@ -352,7 +352,7 @@ macro_rules! impl_outer_origin { } impl From<$module::Origin < $( $generic )? $(, $module::$generic_instance )? > > for $name { - /// Convert to runtime origin using `frame-system::Trait::BaseCallFilter`. + /// Convert to runtime origin using `frame-system::Config::BaseCallFilter`. fn from(x: $module::Origin < $( $generic )? $(, $module::$generic_instance )? >) -> Self { let x: $caller_name = x.into(); x.into() @@ -388,7 +388,7 @@ mod tests { mod frame_system { use super::*; - pub trait Trait { + pub trait Config { type AccountId; type Call; type BaseCallFilter; @@ -410,7 +410,7 @@ mod tests { } } - pub type Origin = RawOrigin<::AccountId>; + pub type Origin = RawOrigin<::AccountId>; } mod origin_without_generic { @@ -439,7 +439,7 @@ mod tests { } } - impl frame_system::Trait for TestRuntime { + impl frame_system::Config for TestRuntime { type AccountId = u32; type Call = u32; type BaseCallFilter = BaseCallFilter; diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 431b5e09303844db6bf212e325e251b22708c642..d98615544727278596b76959c315b4d49510cff8 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -25,6 +25,14 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; pub use sp_core::storage::{ChildInfo, ChildType}; +/// The outcome of calling [`kill_storage`]. +pub enum KillOutcome { + /// No key remains in the child trie. + AllRemoved, + /// At least one key still resides in the child trie due to the supplied limit. + SomeRemaining, +} + /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( child_info: &ChildInfo, @@ -148,13 +156,37 @@ pub fn exists( } /// Remove all `storage_key` key/values +/// +/// Deletes all keys from the overlay and up to `limit` keys from the backend if +/// it is set to `Some`. No limit is applied when `limit` is set to `None`. +/// +/// The limit can be used to partially delete a child trie in case it is too large +/// to delete in one go (block). +/// +/// # Note +/// +/// Please note that keys that are residing in the overlay for that child trie when +/// issuing this call are all deleted without counting towards the `limit`. Only keys +/// written during the current block are part of the overlay. Deleting with a `limit` +/// mostly makes sense with an empty overlay for that child trie. +/// +/// Calling this function multiple times per block for the same `storage_key` does +/// not make much sense because it is not cumulative when called inside the same block. +/// Use this function to distribute the deletion of a single child trie across multiple +/// blocks. pub fn kill_storage( child_info: &ChildInfo, -) { - match child_info.child_type() { + limit: Option, +) -> KillOutcome { + let all_removed = match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( child_info.storage_key(), + limit ), + }; + match all_removed { + true => KillOutcome::AllRemoved, + false => KillOutcome::SomeRemaining, } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index cbc62c83de8861a99b60f856c34750f4382ea727..6fb3abca5ca7f3357ba3b6fe157e841d2b2237d6 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -425,7 +425,7 @@ mod test_iterators { storage::{generator::StorageDoubleMap, IterableStorageDoubleMap, unhashed}, }; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -433,14 +433,14 @@ mod test_iterators { } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); crate::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(twox_64_concat) u32 => u64; } } diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index 601fd4c4a8dd2c1a851284e9075e535c42ff3efc..2c2390865d02fec56a32073db0a20ac7983cb5ba 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -325,7 +325,7 @@ mod test_iterators { storage::{generator::StorageMap, IterableStorageMap, unhashed}, }; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; @@ -333,14 +333,14 @@ mod test_iterators { } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); crate::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { Map: map hasher(blake2_128_concat) u16 => u64; } } diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 9346718f6348132b491ab7a7f2aee07f5ff2cd8b..4b444ce074f001ceba2ca6cf8ceceb33844d7517 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -42,14 +42,14 @@ mod tests { struct Runtime; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } - impl Trait for Runtime { + impl Config for Runtime { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); @@ -57,11 +57,11 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } crate::decl_storage! { - trait Store for Module as Runtime { + trait Store for Module as Runtime { Value get(fn value) config(): (u64, u64); NumberMap: map hasher(identity) u32 => u64; DoubleMap: double_map hasher(identity) u32, hasher(identity) u32 => u64; diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 8e315cef85cb0e4d157f059cd5886b546e06b582..3e37c0522e321b832c18c63d9779bb84405edb28 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -34,7 +34,7 @@ use sp_std::vec::Vec; /// /// Each value is stored at: /// ```nocompile -/// Twox128(::name()) +/// Twox128(Prefix::pallet_prefix()) /// ++ Twox128(Prefix::STORAGE_PREFIX) /// ++ Hasher1(encode(key1)) /// ++ Hasher2(encode(key2)) @@ -68,8 +68,7 @@ where type Hasher1 = Hasher1; type Hasher2 = Hasher2; fn module_prefix() -> &'static [u8] { - ::name::() - .expect("Every active pallet has a name in the runtime; qed").as_bytes() + Prefix::pallet_prefix().as_bytes() } fn storage_prefix() -> &'static [u8] { Prefix::STORAGE_PREFIX.as_bytes() @@ -415,8 +414,7 @@ mod test { struct Prefix; impl StorageInstance for Prefix { - type Pallet = (); - type PalletInfo = (); + fn pallet_prefix() -> &'static str { "test" } const STORAGE_PREFIX: &'static str = "foo"; } diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index d28b7dbaa7e25472ca572ff9a75807f9c7fb5057..64f9ff4b052abc9b08063a5122a700781d77d372 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -33,7 +33,7 @@ use sp_std::prelude::*; /// /// Each value is stored at: /// ```nocompile -/// Twox128(::name()) +/// Twox128(Prefix::pallet_prefix()) /// ++ Twox128(Prefix::STORAGE_PREFIX) /// ++ Hasher1(encode(key)) /// ``` @@ -60,8 +60,7 @@ where type Query = QueryKind::Query; type Hasher = Hasher; fn module_prefix() -> &'static [u8] { - ::name::() - .expect("Every active pallet has a name in the runtime; qed").as_bytes() + Prefix::pallet_prefix().as_bytes() } fn storage_prefix() -> &'static [u8] { Prefix::STORAGE_PREFIX.as_bytes() @@ -318,8 +317,7 @@ mod test { struct Prefix; impl StorageInstance for Prefix { - type Pallet = (); - type PalletInfo = (); + fn pallet_prefix() -> &'static str { "test" } const STORAGE_PREFIX: &'static str = "foo"; } diff --git a/frame/support/src/storage/types/value.rs b/frame/support/src/storage/types/value.rs index da80963b28f33c191afec5dd035367cf1830c3b1..649b7b9fd272b5dc37735b8d835dbe5e570d957b 100644 --- a/frame/support/src/storage/types/value.rs +++ b/frame/support/src/storage/types/value.rs @@ -31,7 +31,7 @@ use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; /// /// Each value is stored at: /// ```nocompile -/// Twox128(::name()) ++ Twox128(Prefix::STORAGE_PREFIX) +/// Twox128(Prefix::pallet_prefix()) ++ Twox128(Prefix::STORAGE_PREFIX) /// ``` pub struct StorageValue( core::marker::PhantomData<(Prefix, Value, QueryKind, OnEmpty)> @@ -47,8 +47,7 @@ where { type Query = QueryKind::Query; fn module_prefix() -> &'static [u8] { - ::name::() - .expect("Every active pallet has a name in the runtime; qed").as_bytes() + Prefix::pallet_prefix().as_bytes() } fn storage_prefix() -> &'static [u8] { Prefix::STORAGE_PREFIX.as_bytes() @@ -201,8 +200,7 @@ mod test { struct Prefix; impl StorageInstance for Prefix { - type Pallet = (); - type PalletInfo = (); + fn pallet_prefix() -> &'static str { "test" } const STORAGE_PREFIX: &'static str = "foo"; } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index b40ebe3dba67c8ac78667eeba0a015e355a352c4..718f1d6354a3af9c35ff594aa1f9ab806da333f6 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1653,16 +1653,16 @@ pub trait EnsureOrigin { /// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by /// `construct_runtime` and `impl_outer_dispatch`. pub trait UnfilteredDispatchable { - /// The origin type of the runtime, (i.e. `frame_system::Trait::Origin`). + /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). type Origin; /// Dispatch this call but do not check the filter in origin. fn dispatch_bypass_filter(self, origin: Self::Origin) -> crate::dispatch::DispatchResultWithPostInfo; } -/// Methods available on `frame_system::Trait::Origin`. +/// Methods available on `frame_system::Config::Origin`. pub trait OriginTrait: Sized { - /// Runtime call type, as in `frame_system::Trait::Call` + /// Runtime call type, as in `frame_system::Config::Call` type Call; /// The caller origin, overarching type of all pallets origins. @@ -1674,7 +1674,7 @@ pub trait OriginTrait: Sized { /// Add a filter to the origin. fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); - /// Reset origin filters to default one, i.e `frame_system::Trait::BaseCallFilter`. + /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. fn reset_filter(&mut self); /// Replace the caller with caller from the other origin @@ -1686,13 +1686,13 @@ pub trait OriginTrait: Sized { /// Get the caller. fn caller(&self) -> &Self::PalletsOrigin; - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. fn none() -> Self; /// Create with system root origin and no filter. fn root() -> Self; - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. fn signed(by: Self::AccountId) -> Self; } @@ -1731,13 +1731,19 @@ pub trait Instance: 'static { const PREFIX: &'static str; } -/// An instance of a storage. +/// An instance of a storage in a pallet. /// -/// It is required the the couple `(PalletInfo::name(), STORAGE_PREFIX)` is unique. -/// Any storage with same couple will collide. +/// Define an instance for an individual storage inside a pallet. +/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is +/// used to isolate storages inside a pallet. +/// +/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which +/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` pub trait StorageInstance { - type Pallet: 'static; - type PalletInfo: PalletInfo; + /// Prefix of a pallet to isolate it from other pallets. + fn pallet_prefix() -> &'static str; + + /// Prefix given to a storage to isolate from other storages in the pallet. const STORAGE_PREFIX: &'static str; } @@ -1895,7 +1901,7 @@ impl PalletVersion { /// Returns the storage key for a pallet version. /// - /// See [`PALLET_VERSION_STORAGE_KEY_POSTIFX`] on how this key is built. + /// See [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. /// /// Returns `None` if the given `PI` returned a `None` as name for the given /// `Pallet`. diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 74f0773aa541fc25d93adb12f883bfde2d89f4c3..d4dda427ef1c271d4c4729667cd1527202e139c1 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -39,9 +39,9 @@ //! `Yes`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 1000] //! fn dispatching(origin) { unimplemented!() } //! } @@ -52,10 +52,10 @@ //! 2.1 Define weight and class, **in which case `PaysFee` would be `Yes`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::DispatchClass; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, DispatchClass::Operational)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -66,10 +66,10 @@ //! 2.2 Define weight and `PaysFee`, **in which case `ClassifyDispatch` would be `Normal`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::Pays; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, Pays::No)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -80,10 +80,10 @@ //! 3. Define all 3 parameters. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::{DispatchClass, Pays}; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, DispatchClass::Operational, Pays::No)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -100,10 +100,10 @@ //! all 3 are static values, providing a raw tuple is easier. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::{DispatchClass, FunctionOf, Pays}; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = FunctionOf( //! // weight, function. //! |args: (&u32, &u64)| *args.0 as u64 + args.1, @@ -213,6 +213,9 @@ impl Default for Pays { } /// A generalized group of dispatch types. +/// +/// NOTE whenever upgrading the enum make sure to also update +/// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] @@ -242,6 +245,39 @@ impl Default for DispatchClass { } } +impl DispatchClass { + /// Returns an array containing all dispatch classes. + pub fn all() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational, DispatchClass::Mandatory] + } + + /// Returns an array of all dispatch classes except `Mandatory`. + pub fn non_mandatory() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational] + } +} + +/// A trait that represents one or many values of given type. +/// +/// Useful to accept as parameter type to let the caller pass either a single value directly +/// or an iterator. +pub trait OneOrMany { + /// The iterator type. + type Iter: Iterator; + /// Convert this item into an iterator. + fn into_iter(self) -> Self::Iter; +} + +impl OneOrMany for DispatchClass { + type Iter = sp_std::iter::Once; + fn into_iter(self) -> Self::Iter { sp_std::iter::once(self) } +} + +impl<'a> OneOrMany for &'a [DispatchClass] { + type Iter = sp_std::iter::Cloned>; + fn into_iter(self) -> Self::Iter { self.iter().cloned() } +} + /// Primitives related to priority management of Frame. pub mod priority { /// The starting point of all Operational transactions. 3/4 of u64::max_value(). @@ -695,13 +731,94 @@ impl WeightToFeePolynomial for IdentityFee where } } +/// A struct holding value for each `DispatchClass`. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +pub struct PerDispatchClass { + /// Value for `Normal` extrinsics. + normal: T, + /// Value for `Operational` extrinsics. + operational: T, + /// Value for `Mandatory` extrinsics. + mandatory: T, +} + +impl PerDispatchClass { + /// Create new `PerDispatchClass` with the same value for every class. + pub fn new(val: impl Fn(DispatchClass) -> T) -> Self { + Self { + normal: val(DispatchClass::Normal), + operational: val(DispatchClass::Operational), + mandatory: val(DispatchClass::Mandatory), + } + } + + /// Get a mutable reference to current value of given class. + pub fn get_mut(&mut self, class: DispatchClass) -> &mut T { + match class { + DispatchClass::Operational => &mut self.operational, + DispatchClass::Normal => &mut self.normal, + DispatchClass::Mandatory => &mut self.mandatory, + } + } + + /// Get current value for given class. + pub fn get(&self, class: DispatchClass) -> &T { + match class { + DispatchClass::Normal => &self.normal, + DispatchClass::Operational => &self.operational, + DispatchClass::Mandatory => &self.mandatory, + } + } +} + +impl PerDispatchClass { + /// Set the value of given class. + pub fn set(&mut self, new: T, class: impl OneOrMany) { + for class in class.into_iter() { + *self.get_mut(class) = new.clone(); + } + } +} + +impl PerDispatchClass { + /// Returns the total weight consumed by all extrinsics in the block. + pub fn total(&self) -> Weight { + let mut sum = 0; + for class in DispatchClass::all() { + sum = sum.saturating_add(*self.get(*class)); + } + sum + } + + /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. + pub fn add(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_add(weight); + } + + /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would + /// occur. + pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { + let value = self.get_mut(class); + *value = value.checked_add(weight).ok_or(())?; + Ok(()) + } + + /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of + /// `Weight`. + pub fn sub(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_sub(weight); + } +} + #[cfg(test)] #[allow(dead_code)] mod tests { use crate::{decl_module, parameter_types, traits::Get}; use super::*; - pub trait Trait: 'static { + pub trait Config: 'static { type Origin; type Balance; type BlockNumber; @@ -718,7 +835,7 @@ mod tests { }; } - impl Trait for TraitImpl { + impl Config for TraitImpl { type Origin = u32; type BlockNumber = u32; type Balance = u32; @@ -727,7 +844,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { // no arguments, fixed weight #[weight = 1000] fn f00(_origin) { unimplemented!(); } diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index ee8ace5c983c5034fb79030ac2e99b8155e1d9fb..01484ccfb882b6433edf1558513454f735c870fe 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -21,10 +21,11 @@ sp-inherents = { version = "2.0.0", default-features = false, path = "../../../p sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -trybuild = "1.0.33" +trybuild = { git = "https://github.com/bkchr/trybuild.git", branch = "bkchr-use-workspace-cargo-lock" } pretty_assertions = "0.6.1" rustversion = "1.0.0" frame-metadata = { version = "12.0.0", default-features = false, path = "../../metadata" } +frame-system = { version = "2.0.0", default-features = false, path = "../../system" } [features] default = ["std"] @@ -33,6 +34,7 @@ std = [ "codec/std", "sp-io/std", "frame-support/std", + "frame-system/std", "sp-inherents/std", "sp-core/std", "sp-std/std", diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index a917c781c065c22dff5e7dfada86eff3494608fe..2baf698f1e529603cb5522c625c726b370ee49ec 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -26,7 +26,7 @@ mod pallet_version; /// The configuration trait -pub trait Trait: 'static { +pub trait Config: 'static { /// The runtime origin type. type Origin: codec::Codec + codec::EncodeLike + Default; /// The block number type. @@ -39,5 +39,5 @@ pub trait Trait: 'static { frame_support::decl_module! { /// Some test module - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 4ff4fc6828604ace9567f3a7fed1ae59b80c43d0..33bb4a9cc877b0fee10f414f32c995305a576be8 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -37,11 +37,11 @@ thread_local! { mod module1 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: ::Origin, system=system + pub struct Module, I: Instance = DefaultInstance> for enum Call + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -55,31 +55,31 @@ mod module1 { frame_support::decl_event! { pub enum Event where - ::AccountId + ::AccountId { A(AccountId), } } frame_support::decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { Something } } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module {} + trait Store for Module, I: Instance=DefaultInstance> as Module {} } } mod module2 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module for enum Call - where origin: ::Origin, system=system + pub struct Module for enum Call + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -102,25 +102,25 @@ mod module2 { } frame_support::decl_error! { - pub enum Error for Module { + pub enum Error for Module { Something } } frame_support::decl_storage! { - trait Store for Module as Module {} + trait Store for Module as Module {} } } -impl module1::Trait for Runtime {} -impl module2::Trait for Runtime {} +impl module1::Config for Runtime {} +impl module2::Config for Runtime {} pub type Signature = sr25519::Signature; pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter = (); type Hash = H256; type Origin = Origin; diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index e1624c76830ae21849cd8dd6329476c779569bd6..83a90c96dd62443d170238dfa232156dbe77b623 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/construct_runtime_ui/*.rs"); diff --git a/frame/support/test/tests/decl_module_ui.rs b/frame/support/test/tests/decl_module_ui.rs index 7df64bc52f41261ddedef6b2510347602697eb03..22237d904aeac0dcafe4f4f16ff2432311fded69 100644 --- a/frame/support/test/tests/decl_module_ui.rs +++ b/frame/support/test/tests/decl_module_ui.rs @@ -19,7 +19,7 @@ #[test] fn decl_module_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/decl_module_ui/*.rs"); diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs index 56eff29c5dc1bf137d3f9941606489f550781320..cc7c1ff219d8bbdcf68f4132f7c54dffd5ee5499 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn integrity_test() {} fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 25f3b891d9b47b48aabc204baea95b2b023bbc49..3bf5f58b43a3912135e2c585f2eca6bb169ee9e0 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -2,7 +2,7 @@ error: `integrity_test` can only be passed once as input. --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index 3e1bc25c8d59c353471806a36985ead744638f83..ddde7c72c1cc55b49fc4949f335597e4759fed98 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn on_initialize() -> Weight { 0 } diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 34c5ff3f941a179a664777d6e0d2c9ee84ad5e39..2911d7ded8a2380fffc10e4e636bdecdcc65de7b 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -2,7 +2,7 @@ error: `on_initialize` can only be passed once as input. --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index 8d5727ce9104b2c13e9b0a67e2833aee3a64f7a0..97cf68c799b2a091ace0fc2a3ba74b317ea0187b 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -24,13 +24,13 @@ mod tests { use std::marker::PhantomData; frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_storage! { - trait Store for Module as TestStorage { + trait Store for Module as TestStorage { // non-getters: pub / $default /// Hello, this is doc! @@ -81,14 +81,14 @@ mod tests { struct TraitImpl {} - impl frame_support_test::Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); type DbWeight = (); } - impl Trait for TraitImpl {} + impl Config for TraitImpl {} const EXPECTED_METADATA: StorageMetadata = StorageMetadata { prefix: DecodeDifferent::Encode("TestStorage"), @@ -414,16 +414,16 @@ mod tests { #[cfg(test)] #[allow(dead_code)] mod test2 { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } type PairOf = (T, T); frame_support::decl_storage! { - trait Store for Module as TestStorage { + trait Store for Module as TestStorage { SingleDef : u32; PairDef : PairOf; Single : Option; @@ -438,26 +438,26 @@ mod test2 { struct TraitImpl {} - impl frame_support_test::Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); type DbWeight = (); } - impl Trait for TraitImpl {} + impl Config for TraitImpl {} } #[cfg(test)] #[allow(dead_code)] mod test3 { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { Foo get(fn foo) config(initial_foo): u32; } } @@ -466,14 +466,14 @@ mod test3 { struct TraitImpl {} - impl frame_support_test::Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); type DbWeight = (); } - impl Trait for TraitImpl {} + impl Config for TraitImpl {} } #[cfg(test)] @@ -482,17 +482,17 @@ mod test_append_and_len { use sp_io::TestExternalities; use codec::{Encode, Decode}; - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { NoDefault: Option; JustVec: Vec; @@ -511,14 +511,14 @@ mod test_append_and_len { struct Test {} - impl frame_support_test::Trait for Test { + impl frame_support_test::Config for Test { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); type DbWeight = (); } - impl Trait for Test {} + impl Config for Test {} #[test] fn default_for_option() { diff --git a/frame/support/test/tests/decl_storage_ui.rs b/frame/support/test/tests/decl_storage_ui.rs index 56529d62c28ff7adefc43557cf4fbc75299c39a3..4b082cb8172a71acd785563181b77fffb4ae8d12 100644 --- a/frame/support/test/tests/decl_storage_ui.rs +++ b/frame/support/test/tests/decl_storage_ui.rs @@ -19,7 +19,7 @@ #[test] fn decl_storage_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/decl_storage_ui/*.rs"); diff --git a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs index 58923ed19297c4d78b159f3b3d4d331a8b8cd188..c7de52dd8935b36d97d698e63ed039fc8a7bb497 100644 --- a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value config(value): u32; pub Value2 config(value): u32; } diff --git a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs index e77dcea404ccb2139208dcc37a5574a0907fcf78..60bfa7f89c36faf183b333e8ad5215eca5376ead 100644 --- a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value get(fn value) config(): u32; pub Value2 config(value): u32; } diff --git a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs index b6ccb7ebb7b7bcb42b3328ca022cbf6954b27b24..921dfa6b774ddc7a19dc6fdaf1aa87888715c019 100644 --- a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value get(fn value) config(): u32; pub Value2 get(fn value) config(): u32; } diff --git a/frame/support/test/tests/derive_no_bound.rs b/frame/support/test/tests/derive_no_bound.rs index 29f813c6498bb87d8db992557224c7e2b5635675..48f2f3ec3f6befb7b7cc3148748f41690d7ade6d 100644 --- a/frame/support/test/tests/derive_no_bound.rs +++ b/frame/support/test/tests/derive_no_bound.rs @@ -28,19 +28,19 @@ fn runtime_debug_no_bound_display_correctly() { assert_eq!(format!("{:?}", Unnamed(1)), "Unnamed(1)"); } -trait Trait { +trait Config { type C: std::fmt::Debug + Clone + Eq + PartialEq; } struct Runtime; struct ImplNone; -impl Trait for Runtime { +impl Config for Runtime { type C = u32; } #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -struct StructNamed { +struct StructNamed { a: u32, b: u64, c: T::C, @@ -77,7 +77,7 @@ fn test_struct_named() { } #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>); +struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>); #[test] fn test_struct_unnamed() { @@ -109,7 +109,7 @@ fn test_struct_unnamed() { } #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -enum Enum { +enum Enum { VariantUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>), VariantNamed { a: u32, diff --git a/frame/support/test/tests/derive_no_bound_ui.rs b/frame/support/test/tests/derive_no_bound_ui.rs index da276018f7f8eb6a5c6ad69fc0e930a30b663dc5..ba8fff1f3a5c570ca3a2f009ca10e18d609830ff 100644 --- a/frame/support/test/tests/derive_no_bound_ui.rs +++ b/frame/support/test/tests/derive_no_bound_ui.rs @@ -19,7 +19,7 @@ #[test] fn derive_no_bound_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/derive_no_bound_ui/*.rs"); diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.rs b/frame/support/test/tests/derive_no_bound_ui/clone.rs index 6b80dcedc38807d96b37abdffb39c828b74638f1..2bc1cc492d171f695fa22334c3d53e1c4f30cae6 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.rs +++ b/frame/support/test/tests/derive_no_bound_ui/clone.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::CloneNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.stderr b/frame/support/test/tests/derive_no_bound_ui/clone.stderr index 4b9cccf0b0fa1bd40e834ee515d1028a01022cde..af322f386aec4ca4ef5d1c5242ea787e6d3679d2 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/clone.stderr @@ -1,7 +1,7 @@ -error[E0277]: the trait bound `::C: std::clone::Clone` is not satisfied +error[E0277]: the trait bound `::C: std::clone::Clone` is not satisfied --> $DIR/clone.rs:7:2 | 7 | c: T::C, - | ^ the trait `std::clone::Clone` is not implemented for `::C` + | ^ the trait `std::clone::Clone` is not implemented for `::C` | = note: required by `std::clone::Clone::clone` diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.rs b/frame/support/test/tests/derive_no_bound_ui/debug.rs index f2411da4b41bc86c54d0443279212e5816c8acfb..6016c3e6d98b8ca71eadc645382fd997071ec22d 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.rs +++ b/frame/support/test/tests/derive_no_bound_ui/debug.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::DebugNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/frame/support/test/tests/derive_no_bound_ui/debug.stderr index 838bd7f68a65f52e4b4f0ac17cf4152ec04d4a5a..7580cab2ea0b391754a8cd66b2ac96971645d5d2 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/debug.stderr @@ -1,8 +1,8 @@ -error[E0277]: `::C` doesn't implement `std::fmt::Debug` +error[E0277]: `::C` doesn't implement `std::fmt::Debug` --> $DIR/debug.rs:7:2 | 7 | c: T::C, - | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::C` + = help: the trait `std::fmt::Debug` is not implemented for `::C` = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.rs b/frame/support/test/tests/derive_no_bound_ui/eq.rs index 9e4026734fbeb72a93f6539d7ab8a53af8a0cf8b..a48452626368c08dd9d0df1265a344caccce86e7 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.rs +++ b/frame/support/test/tests/derive_no_bound_ui/eq.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::EqNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index 08341c4d65ab5e959a1fc5a65ec56466284fdf24..bd5df600dc428e20783769fbd15038278e189b07 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -1,7 +1,7 @@ error[E0277]: can't compare `Foo` with `Foo` --> $DIR/eq.rs:6:8 | -6 | struct Foo { +6 | struct Foo { | ^^^ no implementation for `Foo == Foo` | = help: the trait `std::cmp::PartialEq` is not implemented for `Foo` diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs b/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs index 1720776a40029bd1011a75ec8dccaf60ddfd30af..7bd6b7ef6a2e3c5fec0ee6488356c66a76b5593d 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::PartialEqNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr index d85757c520aa117ad86e34c99920d2219f722c87..64f844e547be0a70e5bd91feaf0b7ff5b84dafe2 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr @@ -1,7 +1,7 @@ -error[E0369]: binary operation `==` cannot be applied to type `::C` +error[E0369]: binary operation `==` cannot be applied to type `::C` --> $DIR/partial_eq.rs:7:2 | 7 | c: T::C, | ^ | - = note: the trait `std::cmp::PartialEq` is not implemented for `::C` + = note: the trait `std::cmp::PartialEq` is not implemented for `::C` diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index 6bd1252825466e42171f104e66c3fa49106b4b76..e7c95c6b432a4f1a52c3f0ea36ae3e5084ddfe56 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -21,14 +21,14 @@ use frame_support::{StorageDoubleMap, StorageMap, StorageValue, StoragePrefixedM use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; mod no_instance { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value config(value): u32; pub Map: map hasher(blake2_128_concat) u32 => u32; @@ -45,15 +45,15 @@ mod no_instance { } mod instance { - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module, I: Instance = DefaultInstance> + trait Store for Module, I: Instance = DefaultInstance> as FinalKeysSome { pub Value config(value): u32; diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index f268f11a4dc15fc5113e87fc74ca1ab7fe379d95..4a875bb68890979ec1cdf0e5b935c283df7220cb 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -15,28 +15,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { pub AppendableDM config(t): double_map hasher(identity) u32, hasher(identity) T::BlockNumber => Vec; } } struct Test; -impl frame_support_test::Trait for Test { +impl frame_support_test::Config for Test { type BlockNumber = u32; type Origin = (); type PalletInfo = (); type DbWeight = (); } -impl Trait for Test {} +impl Config for Test {} #[test] fn init_genesis_config() { diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 6c90767f92e579e14806939976e0b19ebd4c0a3a..b5bb6dd671b97de13e6f3666794a88200eef6117 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -41,16 +41,16 @@ mod module1 { use super::*; use sp_std::ops::Add; - pub trait Trait: system::Trait where ::BlockNumber: From { - type Event: From> + Into<::Event>; + pub trait Config: system::Config where ::BlockNumber: From { + type Event: From> + Into<::Event>; type Origin: From>; type SomeParameter: Get; type GenericType: Default + Clone + Codec + EncodeLike; } frame_support::decl_module! { - pub struct Module, I: Instance> for enum Call where - origin: ::Origin, + pub struct Module, I: Instance> for enum Call where + origin: ::Origin, system = system, T::BlockNumber: From { @@ -67,7 +67,7 @@ mod module1 { } frame_support::decl_storage! { - trait Store for Module, I: Instance> as Module1 where + trait Store for Module, I: Instance> as Module1 where T::BlockNumber: From + std::fmt::Display { pub Value config(value): T::GenericType; @@ -83,7 +83,7 @@ mod module1 { } frame_support::decl_error! { - pub enum Error for Module, I: Instance> where + pub enum Error for Module, I: Instance> where T::BlockNumber: From, T::BlockNumber: Add, T::AccountId: AsRef<[u8]>, @@ -101,14 +101,14 @@ mod module1 { } #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I> where T::BlockNumber: From { + pub enum Origin, I> where T::BlockNumber: From { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: Instance> ProvideInherent for Module where + impl, I: Instance> ProvideInherent for Module where T::BlockNumber: From { type Call = Call; @@ -131,17 +131,17 @@ mod module1 { mod module2 { use super::*; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Amount: Parameter + Default; - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; type Origin: From>; } - impl, I: Instance> Currency for Module {} + impl, I: Instance> Currency for Module {} frame_support::decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin, + pub struct Module, I: Instance=DefaultInstance> for enum Call where + origin: ::Origin, system = system { fn deposit_event() = default; @@ -149,7 +149,7 @@ mod module2 { } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module2 { + trait Store for Module, I: Instance=DefaultInstance> as Module2 { pub Value config(value): T::Amount; pub Map config(map): map hasher(identity) u64 => u64; pub DoubleMap config(double_map): double_map hasher(identity) u64, hasher(identity) u64 => u64; @@ -157,20 +157,20 @@ mod module2 { } frame_support::decl_event! { - pub enum Event where Amount = >::Amount { + pub enum Event where Amount = >::Amount { Variant(Amount), } } #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I=DefaultInstance> { + pub enum Origin, I=DefaultInstance> { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: Instance> ProvideInherent for Module { + impl, I: Instance> ProvideInherent for Module { type Call = Call; type Error = MakeFatalError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; @@ -190,13 +190,13 @@ mod module2 { mod module3 { use super::*; - pub trait Trait: module2::Trait + module2::Trait + system::Trait { + pub trait Config: module2::Config + module2::Config + system::Config { type Currency: Currency; type Currency2: Currency; } frame_support::decl_module! { - pub struct Module for enum Call where origin: ::Origin, system=system {} + pub struct Module for enum Call where origin: ::Origin, system=system {} } } @@ -204,39 +204,39 @@ parameter_types! { pub const SomeValue: u32 = 100; } -impl module1::Trait for Runtime { +impl module1::Config for Runtime { type Event = Event; type Origin = Origin; type SomeParameter = SomeValue; type GenericType = u32; } -impl module1::Trait for Runtime { +impl module1::Config for Runtime { type Event = Event; type Origin = Origin; type SomeParameter = SomeValue; type GenericType = u32; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u16; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u32; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u32; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u64; type Event = Event; type Origin = Origin; } -impl module3::Trait for Runtime { +impl module3::Config for Runtime { type Currency = Module2_2; type Currency2 = Module2_3; } @@ -246,7 +246,7 @@ pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter= (); type Hash = H256; type Origin = Origin; diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 596a3b6ffb25dd46d01f8f855756bbd4647dd728..70a84dfee59de9c90cf3e6deab580dfc9829cfad 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -27,9 +27,9 @@ mod module { use super::*; pub type Request = ( - ::AccountId, + ::AccountId, Role, - ::BlockNumber, + ::BlockNumber, ); pub type Requests = Vec>; @@ -39,7 +39,7 @@ mod module { } #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] - pub struct RoleParameters { + pub struct RoleParameters { // minimum actors to maintain - if role is unstaking // and remaining actors would be less that this value - prevent or punish for unstaking pub min_actors: u32, @@ -65,7 +65,7 @@ mod module { pub startup_grace_period: T::BlockNumber, } - impl Default for RoleParameters { + impl Default for RoleParameters { fn default() -> Self { Self { max_actors: 10, @@ -81,18 +81,18 @@ mod module { } } - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] - pub struct Data { + pub struct Data { pub data: T::BlockNumber, } - impl Default for Data { + impl Default for Data { fn default() -> Self { Self { data: T::BlockNumber::default(), @@ -101,7 +101,7 @@ mod module { } frame_support::decl_storage! { - trait Store for Module as Actors { + trait Store for Module as Actors { /// requirements to enter and maintain status in roles pub Parameters get(fn parameters) build(|config: &GenesisConfig| { if config.enable_storage_role { @@ -157,7 +157,7 @@ pub type Header = generic::Header; pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter = (); type Hash = H256; type Origin = Origin; @@ -169,7 +169,7 @@ impl system::Trait for Runtime { type DbWeight = (); } -impl module::Trait for Runtime {} +impl module::Config for Runtime {} frame_support::construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/tests/pallet_version.rs b/frame/support/test/tests/pallet_version.rs index d6293ac6a308aa087b154cac2a171bff405b9a97..00750c6767216847a89e522d217abe635efbbdb4 100644 --- a/frame/support/test/tests/pallet_version.rs +++ b/frame/support/test/tests/pallet_version.rs @@ -37,11 +37,11 @@ const SOME_TEST_VERSION: PalletVersion = PalletVersion { major: 3000, minor: 30, mod module1 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where - origin: ::Origin, + pub struct Module for enum Call where + origin: ::Origin, system = system, {} } @@ -52,11 +52,11 @@ mod module1 { mod module2 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin, + pub struct Module, I: Instance=DefaultInstance> for enum Call where + origin: ::Origin, system = system { fn on_runtime_upgrade() -> Weight { @@ -78,21 +78,21 @@ mod module2 { } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module2 {} + trait Store for Module, I: Instance=DefaultInstance> as Module2 {} } } -impl module1::Trait for Runtime {} -impl module2::Trait for Runtime {} -impl module2::Trait for Runtime {} -impl module2::Trait for Runtime {} +impl module1::Config for Runtime {} +impl module2::Config for Runtime {} +impl module2::Config for Runtime {} +impl module2::Config for Runtime {} pub type Signature = sr25519::Signature; pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter= (); type Hash = H256; type Origin = Origin; diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs new file mode 100644 index 0000000000000000000000000000000000000000..01b965f3b514702b31a4f51537cea54f72fd398e --- /dev/null +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub trait Trait: frame_system::Config { + type Balance: frame_support::dispatch::Parameter; + /// The overarching event type. + type Event: From> + Into<::Event>; +} + +frame_support::decl_storage! { + trait Store for Module as Example { + Dummy get(fn dummy) config(): Option; + } +} + +frame_support::decl_event!( + pub enum Event where B = ::Balance { + Dummy(B), + } +); + +frame_support::decl_error!( + pub enum Error for Module { + Dummy, + } +); + +frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + type Error = Error; + const Foo: u32 = u32::max_value(); + + #[weight = 0] + fn accumulate_dummy(origin, increase_by: T::Balance) { + unimplemented!(); + } + + fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { + 0 + } + } +} + +impl sp_runtime::traits::ValidateUnsigned for Module { + type Call = Call; + + fn validate_unsigned( + _source: sp_runtime::transaction_validity::TransactionSource, + _call: &Self::Call, + ) -> sp_runtime::transaction_validity::TransactionValidity { + unimplemented!(); + } +} + +pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"12345678"; + +impl sp_inherents::ProvideInherent for Module { + type Call = Call; + type Error = sp_inherents::MakeFatalError; + const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &sp_inherents::InherentData) -> Option { + unimplemented!(); + } + + fn check_inherent(_: &Self::Call, _: &sp_inherents::InherentData) -> std::result::Result<(), Self::Error> { + unimplemented!(); + } +} + +#[cfg(test)] +mod tests { + use crate as pallet_test; + + use frame_support::parameter_types; + use sp_runtime::traits::Block; + + type SignedExtra = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ); + type TestBlock = sp_runtime::generic::Block; + type TestHeader = sp_runtime::generic::Header; + type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + ::AccountId, + ::Call, + (), + SignedExtra, + >; + + frame_support::construct_runtime!( + pub enum Runtime where + Block = TestBlock, + NodeBlock = TestBlock, + UncheckedExtrinsic = TestUncheckedExtrinsic + { + System: frame_system::{Module, Call, Config, Storage, Event}, + PalletTest: pallet_test::{Module, Call, Storage, Event, Config, ValidateUnsigned, Inherent}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + } + + impl frame_system::Config for Runtime { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = sp_core::H256; + type Call = Call; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = TestHeader; + type Event = (); + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + } + + impl pallet_test::Trait for Runtime { + type Balance = u32; + type Event = (); + } +} diff --git a/frame/support/test/tests/reserved_keyword.rs b/frame/support/test/tests/reserved_keyword.rs index 382b2e498741fd6740dc7b1c94537706ce13ffac..8136d11824ace94ef484e02fa059193dd254ad03 100644 --- a/frame/support/test/tests/reserved_keyword.rs +++ b/frame/support/test/tests/reserved_keyword.rs @@ -19,7 +19,7 @@ #[test] fn reserved_keyword() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/reserved_keyword/*.rs"); diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.rs b/frame/support/test/tests/reserved_keyword/on_initialize.rs index 781b72bd04e8c3ab4358010f3c1b16305d61d430..72d53abfb1034f6cd0d6bcac61c18837674ea49c 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.rs +++ b/frame/support/test/tests/reserved_keyword/on_initialize.rs @@ -4,7 +4,7 @@ macro_rules! reserved { mod $reserved { pub use frame_support::dispatch; - pub trait Trait: frame_support_test::Trait {} + pub trait Config: frame_support_test::Config {} pub mod system { use frame_support::dispatch; @@ -15,7 +15,7 @@ macro_rules! reserved { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { #[weight = 0] fn $reserved(_origin) -> dispatch::DispatchResult { unreachable!() } } diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index 5c687ef05005d6b6c7492840dd3b211d89bf3954..93b531a678d91875a398d4eea3fde0dfe4c6e842 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -22,10 +22,10 @@ use frame_support::{ use sp_io::TestExternalities; use sp_std::result; -pub trait Trait: frame_support_test::Trait {} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { #[weight = 0] #[transactional] fn value_commits(_origin, v: u32) { @@ -42,7 +42,7 @@ frame_support::decl_module! { } frame_support::decl_storage!{ - trait Store for Module as StorageTransactions { + trait Store for Module as StorageTransactions { pub Value: u32; pub Map: map hasher(twox_64_concat) String => u32; } @@ -50,14 +50,14 @@ frame_support::decl_storage!{ struct Runtime; -impl frame_support_test::Trait for Runtime { +impl frame_support_test::Config for Runtime { type Origin = u32; type BlockNumber = u32; type PalletInfo = (); type DbWeight = (); } -impl Trait for Runtime {} +impl Config for Runtime {} #[test] fn storage_transaction_basic_commit() { diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index f30b6e4c2af9d1c0989a445a5695042300563473..2021aa43f518d03ae1e23b61d5c1a2e03590e634 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -19,7 +19,7 @@ use frame_support::{ codec::{Encode, Decode, EncodeLike}, traits::Get, weights::RuntimeDbWeight, }; -pub trait Trait: 'static + Eq + Clone { +pub trait Config: 'static + Eq + Clone { type Origin: Into, Self::Origin>> + From>; @@ -34,18 +34,18 @@ pub trait Trait: 'static + Eq + Clone { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { #[weight = 0] fn noop(origin) {} } } -impl Module { +impl Module { pub fn deposit_event(_event: impl Into) {} } frame_support::decl_event!( - pub enum Event where BlockNumber = ::BlockNumber { + pub enum Event where BlockNumber = ::BlockNumber { ExtrinsicSuccess, ExtrinsicFailed, Ignore(BlockNumber), @@ -53,7 +53,7 @@ frame_support::decl_event!( ); frame_support::decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Test error documentation TestError, /// Error documentation @@ -79,7 +79,7 @@ impl From> for RawOrigin { } } -pub type Origin = RawOrigin<::AccountId>; +pub type Origin = RawOrigin<::AccountId>; #[allow(dead_code)] pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> diff --git a/frame/system/README.md b/frame/system/README.md index adfa7aa35ddda59e18ef32a96c8a114f1c609c7a..106a16bc209d62dd01cebd64664c36adc1580a1f 100644 --- a/frame/system/README.md +++ b/frame/system/README.md @@ -57,10 +57,10 @@ Import the System module and derive your module's configuration trait from the s use frame_support::{decl_module, dispatch}; use frame_system::{self as system, ensure_signed}; -pub trait Trait: system::Trait {} +pub trait Config: system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn system_module_example(origin) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -72,4 +72,4 @@ decl_module! { } ``` -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 00c965136c0d0f15c802a6d37668e79ee2eea850..490931748863d9f785f392007402e339329bebb9 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -24,12 +24,12 @@ use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header mod module { use super::*; - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; + pub trait Config: system::Config { + type Event: From + Into<::Event>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { pub fn deposit_event() = default; } } @@ -54,14 +54,22 @@ impl_outer_event! { frame_support::parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + 4 * 1024 * 1024, Perbill::from_percent(75), + ); + pub BlockLength: frame_system::limits::BlockLength = + frame_system::limits::BlockLength::max_with_normal_ratio( + 4 * 1024 * 1024, Perbill::from_percent(75), + ); } #[derive(Clone, Eq, PartialEq)] pub struct Runtime; -impl system::Trait for Runtime { +impl system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = BlockLength; + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -73,13 +81,6 @@ impl system::Trait for Runtime { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -88,7 +89,7 @@ impl system::Trait for Runtime { type SystemWeightInfo = (); } -impl module::Trait for Runtime { +impl module::Config for Runtime { type Event = Event; } diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index b631d00e47c5049ebcea04478aca5a0cc666e206..080b1cd80f29721516a2ff38756335402701a172 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -25,20 +25,23 @@ use sp_std::prelude::*; use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use sp_runtime::traits::Hash; use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_support::traits::Get; -use frame_support::storage::{self, StorageMap}; +use frame_support::{ + storage::{self, StorageMap}, + traits::Get, + weights::DispatchClass, +}; use frame_system::{Module as System, Call, RawOrigin, DigestItemOf, AccountInfo}; mod mock; -pub struct Module(System); -pub trait Trait: frame_system::Trait {} +pub struct Module(System); +pub trait Config: frame_system::Config {} benchmarks! { _ { } remark { - let b in 0 .. T::MaximumBlockLength::get(); + let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32; let remark_message = vec![1; b as usize]; let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), remark_message) diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 33255d7b50e19470af07808b47d976d536c8799e..8cfd70b2f0950ad1a39557658d4f53a9a4e85456 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -38,7 +38,7 @@ pub struct Call; impl Dispatchable for Call { type Origin = (); - type Trait = (); + type Config = (); type Info = DispatchInfo; type PostInfo = PostDispatchInfo; fn dispatch(self, _origin: Self::Origin) @@ -50,8 +50,11 @@ impl Dispatchable for Call { #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -63,13 +66,6 @@ impl frame_system::Trait for Test { type Header = sp_runtime::testing::Header; type Event = (); type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); type PalletInfo = (); type AccountData = (); @@ -78,7 +74,7 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index d0a346519ca23b8661cd92285ec2d72e5753b7a9..f60437887b1d97e2e330cd0b3cbf62c94ad22e4d 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -16,7 +16,7 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::{Trait, Module}; +use crate::{Config, Module}; use sp_runtime::{ traits::{SignedExtension, Zero}, transaction_validity::TransactionValidityError, @@ -24,9 +24,9 @@ use sp_runtime::{ /// Genesis hash check to provide replay protection between different networks. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckGenesis(sp_std::marker::PhantomData); +pub struct CheckGenesis(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckGenesis { +impl sp_std::fmt::Debug for CheckGenesis { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckGenesis") @@ -38,16 +38,16 @@ impl sp_std::fmt::Debug for CheckGenesis { } } -impl CheckGenesis { +impl CheckGenesis { /// Creates new `SignedExtension` to check genesis hash. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckGenesis { +impl SignedExtension for CheckGenesis { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = T::Hash; type Pre = (); const IDENTIFIER: &'static str = "CheckGenesis"; diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 7e3f65d0324d705fd5f6b1ce460b249917907ff4..fbc37f527d81a8387af81b5e37481aa853a3d2d8 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -16,7 +16,7 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::{Trait, Module, BlockHash}; +use crate::{Config, Module, BlockHash}; use frame_support::StorageMap; use sp_runtime::{ generic::Era, @@ -28,16 +28,16 @@ use sp_runtime::{ /// Check for transaction mortality. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckMortality(Era, sp_std::marker::PhantomData); +pub struct CheckMortality(Era, sp_std::marker::PhantomData); -impl CheckMortality { +impl CheckMortality { /// utility constructor. Used only in client/factory code. pub fn from(era: Era) -> Self { Self(era, sp_std::marker::PhantomData) } } -impl sp_std::fmt::Debug for CheckMortality { +impl sp_std::fmt::Debug for CheckMortality { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckMortality({:?})", self.0) @@ -49,7 +49,7 @@ impl sp_std::fmt::Debug for CheckMortality { } } -impl SignedExtension for CheckMortality { +impl SignedExtension for CheckMortality { type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = T::Hash; diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index e7316457aaffcb1360896c840e45b3eeb198e713..a1a310833cd3c098a8fb9bd71723f9d88ff90ee1 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -16,7 +16,7 @@ // limitations under the License. use codec::{Encode, Decode}; -use crate::Trait; +use crate::Config; use frame_support::{ weights::DispatchInfo, StorageMap, @@ -35,16 +35,16 @@ use sp_std::vec; /// Note that this does not set any priority by default. Make sure that AT LEAST one of the signed /// extension sets some kind of priority upon validating transactions. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckNonce(#[codec(compact)] T::Index); +pub struct CheckNonce(#[codec(compact)] T::Index); -impl CheckNonce { +impl CheckNonce { /// utility constructor. Used only in client/factory code. pub fn from(nonce: T::Index) -> Self { Self(nonce) } } -impl sp_std::fmt::Debug for CheckNonce { +impl sp_std::fmt::Debug for CheckNonce { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckNonce({})", self.0) @@ -56,7 +56,7 @@ impl sp_std::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce where +impl SignedExtension for CheckNonce where T::Call: Dispatchable { type AccountId = T::AccountId; diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs index 8dc4d8d9ceddc52893795cda924366bdec44975f..f4838ab354725dfb519a115865e74991e0e2965b 100644 --- a/frame/system/src/extensions/check_spec_version.rs +++ b/frame/system/src/extensions/check_spec_version.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; +use crate::{Config, Module}; use codec::{Encode, Decode}; use sp_runtime::{ traits::SignedExtension, @@ -24,9 +24,9 @@ use sp_runtime::{ /// Ensure the runtime version registered in the transaction is the same as at present. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckSpecVersion(sp_std::marker::PhantomData); +pub struct CheckSpecVersion(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckSpecVersion { +impl sp_std::fmt::Debug for CheckSpecVersion { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckSpecVersion") @@ -38,16 +38,16 @@ impl sp_std::fmt::Debug for CheckSpecVersion { } } -impl CheckSpecVersion { +impl CheckSpecVersion { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckSpecVersion { +impl SignedExtension for CheckSpecVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckSpecVersion"; diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs index ee6f3349365b9b9b21801eabd91ef32863a45007..5a1c8cc738610308765d6e5c80d8a7803f837bdb 100644 --- a/frame/system/src/extensions/check_tx_version.rs +++ b/frame/system/src/extensions/check_tx_version.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; +use crate::{Config, Module}; use codec::{Encode, Decode}; use sp_runtime::{ traits::SignedExtension, @@ -24,9 +24,9 @@ use sp_runtime::{ /// Ensure the transaction version registered in the transaction is the same as at present. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckTxVersion(sp_std::marker::PhantomData); +pub struct CheckTxVersion(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckTxVersion { +impl sp_std::fmt::Debug for CheckTxVersion { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckTxVersion") @@ -38,16 +38,16 @@ impl sp_std::fmt::Debug for CheckTxVersion { } } -impl CheckTxVersion { +impl CheckTxVersion { /// Create new `SignedExtension` to check transaction version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckTxVersion { +impl SignedExtension for CheckTxVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckTxVersion"; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 39439a3e2d8ce74f9d4124712cad47cb8b72a325..fc74b03a61cc1e3648592df1464d8838ddf31d97 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; +use crate::{limits::BlockWeights, Config, Module}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Printable}, @@ -23,7 +23,7 @@ use sp_runtime::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, TransactionPriority, }, - Perbill, DispatchResult, + DispatchResult, }; use frame_support::{ traits::{Get}, @@ -33,55 +33,22 @@ use frame_support::{ /// Block resource (weight) limit check. #[derive(Encode, Decode, Clone, Eq, PartialEq, Default)] -pub struct CheckWeight(sp_std::marker::PhantomData); +pub struct CheckWeight(sp_std::marker::PhantomData); -impl CheckWeight where - T::Call: Dispatchable +impl CheckWeight where + T::Call: Dispatchable, { - /// Get the quota ratio of each dispatch class type. This indicates that all operational and mandatory - /// dispatches can use the full capacity of any resource, while user-triggered ones can consume - /// a portion. - fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { - match class { - DispatchClass::Operational | DispatchClass::Mandatory - => ::one(), - DispatchClass::Normal => T::AvailableBlockRatio::get(), - } - } - - /// Checks if the current extrinsic does not exceed `MaximumExtrinsicWeight` limit. + /// Checks if the current extrinsic does not exceed the maximum weight a single extrinsic + /// with given `DispatchClass` can have. fn check_extrinsic_weight( info: &DispatchInfoOf, ) -> Result<(), TransactionValidityError> { - match info.class { - // Mandatory transactions are included in a block unconditionally, so - // we don't verify weight. - DispatchClass::Mandatory => Ok(()), - // Normal transactions must not exceed `MaximumExtrinsicWeight`. - DispatchClass::Normal => { - let maximum_weight = T::MaximumExtrinsicWeight::get(); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > maximum_weight { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } - }, - // For operational transactions we make sure it doesn't exceed - // the space alloted for `Operational` class. - DispatchClass::Operational => { - let maximum_weight = T::MaximumBlockWeight::get(); - let operational_limit = - Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let operational_limit = - operational_limit.saturating_sub(T::BlockExecutionWeight::get()); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > operational_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } + let max = T::BlockWeights::get().get(info.class).max_extrinsic; + match max { + Some(max) if info.weight > max => { + Err(InvalidTransaction::ExhaustsResources.into()) }, + _ => Ok(()), } } @@ -90,51 +57,10 @@ impl CheckWeight where /// Upon successes, it returns the new block weight as a `Result`. fn check_block_weight( info: &DispatchInfoOf, - ) -> Result { - let maximum_weight = T::MaximumBlockWeight::get(); - let mut all_weight = Module::::block_weight(); - match info.class { - // If we have a dispatch that must be included in the block, it ignores all the limits. - DispatchClass::Mandatory => { - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - all_weight.add(extrinsic_weight, DispatchClass::Mandatory); - Ok(all_weight) - }, - // If we have a normal dispatch, we follow all the normal rules and limits. - DispatchClass::Normal => { - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Normal) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - if all_weight.get(DispatchClass::Normal) > normal_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(all_weight) - } - }, - // If we have an operational dispatch, allow it if we have not used our full - // "operational space" (independent of existing fullness). - DispatchClass::Operational => { - let operational_limit = Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let operational_space = operational_limit.saturating_sub(normal_limit); - - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Operational) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - - // If it would fit in normally, its okay - if all_weight.total() <= maximum_weight || - // If we have not used our operational space - all_weight.get(DispatchClass::Operational) <= operational_space { - Ok(all_weight) - } else { - Err(InvalidTransaction::ExhaustsResources.into()) - } - } - } + ) -> Result { + let maximum_weight = T::BlockWeights::get(); + let all_weight = Module::::block_weight(); + calculate_consumed_weight::(maximum_weight, all_weight, info) } /// Checks if the current extrinsic can fit into the block with respect to block length limits. @@ -144,19 +70,18 @@ impl CheckWeight where info: &DispatchInfoOf, len: usize, ) -> Result { + let length_limit = T::BlockLength::get(); let current_len = Module::::all_extrinsics_len(); - let maximum_len = T::MaximumBlockLength::get(); - let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_len; let added_len = len as u32; let next_len = current_len.saturating_add(added_len); - if next_len > limit { + if next_len > *length_limit.max.get(info.class) { Err(InvalidTransaction::ExhaustsResources.into()) } else { Ok(next_len) } } - /// get the priority of an extrinsic denoted by `info`. + /// Get the priority of an extrinsic denoted by `info`. /// /// Operational transaction will be given a fixed initial amount to be fairly distinguished from /// the normal ones. @@ -213,7 +138,54 @@ impl CheckWeight where } } -impl SignedExtension for CheckWeight where +pub fn calculate_consumed_weight( + maximum_weight: BlockWeights, + mut all_weight: crate::ConsumedWeight, + info: &DispatchInfoOf, +) -> Result where + Call: Dispatchable, +{ + let extrinsic_weight = info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); + let limit_per_class = maximum_weight.get(info.class); + + // add the weight. If class is unlimited, use saturating add instead of checked one. + if limit_per_class.max_total.is_none() && limit_per_class.reserved.is_none() { + all_weight.add(extrinsic_weight, info.class) + } else { + all_weight.checked_add(extrinsic_weight, info.class) + .map_err(|_| InvalidTransaction::ExhaustsResources)?; + } + + let per_class = *all_weight.get(info.class); + + // Check if we don't exceed per-class allowance + match limit_per_class.max_total { + Some(max) if per_class > max => { + return Err(InvalidTransaction::ExhaustsResources.into()); + }, + // There is no `max_total` limit (`None`), + // or we are below the limit. + _ => {}, + } + + // In cases total block weight is exceeded, we need to fall back + // to `reserved` pool if there is any. + if all_weight.total() > maximum_weight.max_block { + match limit_per_class.reserved { + // We are over the limit in reserved pool. + Some(reserved) if per_class > reserved => { + return Err(InvalidTransaction::ExhaustsResources.into()); + } + // There is either no limit in reserved pool (`None`), + // or we are below the limit. + _ => {}, + } + } + + Ok(all_weight) +} + +impl SignedExtension for CheckWeight where T::Call: Dispatchable { type AccountId = T::AccountId; @@ -277,7 +249,7 @@ impl SignedExtension for CheckWeight where // to them actually being useful. Block producers are thus not allowed to include mandatory // extrinsics that result in error. if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { - "Bad mandantory".print(); + "Bad mandatory".print(); e.print(); Err(InvalidTransaction::BadMandatory)? @@ -294,7 +266,7 @@ impl SignedExtension for CheckWeight where } } -impl sp_std::fmt::Debug for CheckWeight { +impl sp_std::fmt::Debug for CheckWeight { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckWeight") @@ -315,12 +287,21 @@ mod tests { use frame_support::{assert_ok, assert_noop}; use frame_support::weights::{Weight, Pays}; + fn block_weights() -> crate::limits::BlockWeights { + ::BlockWeights::get() + } + fn normal_weight_limit() -> Weight { - ::AvailableBlockRatio::get() * ::MaximumBlockWeight::get() + block_weights().get(DispatchClass::Normal).max_total + .unwrap_or_else(|| block_weights().max_block) + } + + fn block_weight_limit() -> Weight { + block_weights().max_block } fn normal_length_limit() -> u32 { - ::AvailableBlockRatio::get() * ::MaximumBlockLength::get() + *::BlockLength::get().max.get(DispatchClass::Normal) } #[test] @@ -341,7 +322,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + assert!(System::block_weight().total() > block_weight_limit()); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -352,7 +333,7 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: ::MaximumExtrinsicWeight::get() + 1, + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + 1, class: DispatchClass::Normal, ..Default::default() }; @@ -368,13 +349,12 @@ mod tests { #[test] fn operational_extrinsic_limited_by_operational_space_limit() { new_test_ext().execute_with(|| { - let operational_limit = CheckWeight::::get_dispatch_limit_ratio( - DispatchClass::Operational - ) * ::MaximumBlockWeight::get(); - let base_weight = ::ExtrinsicBaseWeight::get(); - let block_base = ::BlockExecutionWeight::get(); + let weights = block_weights(); + let operational_limit = weights.get(DispatchClass::Operational).max_total + .unwrap_or_else(|| weights.max_block); + let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; - let weight = operational_limit - base_weight - block_base; + let weight = operational_limit - base_weight; let okay = DispatchInfo { weight, class: DispatchClass::Operational, @@ -406,7 +386,7 @@ mod tests { new_test_ext().execute_with(|| { System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + assert!(System::block_weight().total() > block_weight_limit()); }); } @@ -426,8 +406,8 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), 768); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); @@ -446,8 +426,8 @@ mod tests { // Extra 15 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), 266); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); }); } @@ -486,7 +466,7 @@ mod tests { // given almost full block BlockWeight::mutate(|current_weight| { - current_weight.put(normal_limit, DispatchClass::Normal) + current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); @@ -552,19 +532,20 @@ mod tests { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); let small = DispatchInfo { weight: 100, ..Default::default() }; + let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; let medium = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get(), + weight: normal_limit - base_extrinsic, ..Default::default() }; let big = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get() + 1, + weight: normal_limit - base_extrinsic + 1, ..Default::default() }; let len = 0_usize; let reset_check_weight = |i, f, s| { BlockWeight::mutate(|current_weight| { - current_weight.put(s, DispatchClass::Normal) + current_weight.set(s, DispatchClass::Normal) }); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } @@ -586,10 +567,12 @@ mod tests { pays_fee: Default::default(), }; let len = 0_usize; + let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; // We allow 75% for normal transaction, so we put 25% - extrinsic base weight BlockWeight::mutate(|current_weight| { - current_weight.put(256 - ::ExtrinsicBaseWeight::get(), DispatchClass::Normal) + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(256 - base_extrinsic, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); @@ -617,13 +600,14 @@ mod tests { let len = 0_usize; BlockWeight::mutate(|current_weight| { - current_weight.put(128, DispatchClass::Normal) + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(128, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); assert!( @@ -632,7 +616,7 @@ mod tests { ); assert_eq!( BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } @@ -640,17 +624,81 @@ mod tests { #[test] fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { + let weights = block_weights(); let free = DispatchInfo { weight: 0, ..Default::default() }; let len = 0_usize; - // Initial weight from `BlockExecutionWeight` - assert_eq!(System::block_weight().total(), ::BlockExecutionWeight::get()); + // Initial weight from `weights.base_block` + assert_eq!( + System::block_weight().total(), + weights.base_block + ); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); assert!(r.is_ok()); assert_eq!( System::block_weight().total(), - ::ExtrinsicBaseWeight::get() + ::BlockExecutionWeight::get() + weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block ); }) } + + #[test] + fn normal_and_mandatory_tracked_separately() { + new_test_ext().execute_with(|| { + // Max block is 1024 + // Max normal is 768 (75%) + // Max mandatory is unlimited + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let mandatory = DispatchInfo { weight: 1019, class: DispatchClass::Mandatory, ..Default::default() }; + + let len = 0_usize; + + assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + assert_eq!(System::block_weight().total(), 768); + assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), 1024 + 768); + assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); + }); + } + + #[test] + fn no_max_total_should_still_be_limited_by_max_block() { + // given + let maximum_weight = BlockWeights::builder() + .base_block(0) + .for_class(DispatchClass::non_mandatory(), |w| { + w.base_extrinsic = 0; + w.max_total = Some(20); + }) + .for_class(DispatchClass::Mandatory, |w| { + w.base_extrinsic = 0; + w.reserved = Some(5); + w.max_total = None; + }) + .build_or_panic(); + let all_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => 10, + DispatchClass::Operational => 10, + DispatchClass::Mandatory => 0, + }); + assert_eq!(maximum_weight.max_block, all_weight.total()); + + // fits into reserved + let mandatory1 = DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; + // does not fit into reserved and the block is full. + let mandatory2 = DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; + + // when + let result1 = calculate_consumed_weight::<::Call>( + maximum_weight.clone(), all_weight.clone(), &mandatory1 + ); + let result2 = calculate_consumed_weight::<::Call>( + maximum_weight, all_weight, &mandatory2 + ); + + // then + assert!(result2.is_err()); + assert!(result1.is_ok()); + } } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 595b001ea6b013c3f63c1496070fb8913bb497c4..c5586f9856688f722cf69d5df899ef1dce48d396 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -20,7 +20,7 @@ //! The System module provides low-level access to core types and cross-cutting utilities. //! It acts as the base layer for other pallets to interact with the Substrate framework components. //! -//! - [`system::Trait`](./trait.Trait.html) +//! - [`system::Config`](./trait.Config.html) //! //! ## Overview //! @@ -74,10 +74,10 @@ //! use frame_support::{decl_module, dispatch}; //! use frame_system::{self as system, ensure_signed}; //! -//! pub trait Trait: system::Trait {} +//! pub trait Config: system::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn system_module_example(origin) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; @@ -122,7 +122,7 @@ use frame_support::{ }, weights::{ Weight, RuntimeDbWeight, DispatchInfo, DispatchClass, - extract_actual_weight, + extract_actual_weight, PerDispatchClass, }, dispatch::DispatchResultWithPostInfo, }; @@ -132,15 +132,16 @@ use codec::{Encode, Decode, FullCodec, EncodeLike}; use sp_io::TestExternalities; pub mod offchain; +pub mod limits; #[cfg(test)] pub(crate) mod mock; mod extensions; -mod weight; pub mod weights; #[cfg(test)] mod tests; + pub use extensions::{ check_mortality::CheckMortality, check_genesis::CheckGenesis, check_nonce::CheckNonce, check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, @@ -160,11 +161,20 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { H::ordered_trie_root(xts) } -pub trait Trait: 'static + Eq + Clone { +/// An object to track the currently used extrinsic weight in a block. +pub type ConsumedWeight = PerDispatchClass; + +pub trait Config: 'static + Eq + Clone { /// The basic call filter to use in Origin. All origins are built with this filter as base, /// except Root. type BaseCallFilter: Filter; + /// Block & extrinsics weights: base values and limits. + type BlockWeights: Get; + + /// The maximum length of a block (in bytes). + type BlockLength: Get; + /// The `Origin` type used by dispatchable calls. type Origin: Into, Self::Origin>> @@ -219,31 +229,9 @@ pub trait Trait: 'static + Eq + Clone { /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount: Get; - /// The maximum weight of a block. - type MaximumBlockWeight: Get; - /// The weight of runtime database operations the runtime can invoke. type DbWeight: Get; - /// The base weight of executing a block, independent of the transactions in the block. - type BlockExecutionWeight: Get; - - /// The base weight of an Extrinsic in the block, independent of the of extrinsic being executed. - type ExtrinsicBaseWeight: Get; - - /// The maximal weight of a single Extrinsic. This should be set to at most - /// `MaximumBlockWeight - AverageOnInitializeWeight`. The limit only applies to extrinsics - /// containing `Normal` dispatch class calls. - type MaximumExtrinsicWeight: Get; - - /// The maximum length of a block (in bytes). - type MaximumBlockLength: Get; - - /// The portion of the block that is available to normal transaction. The rest can only be used - /// by operational transactions. This can be applied to any resource limit managed by the system - /// module, including weight and length. - type AvailableBlockRatio: Get; - /// Get the chain's current version. type Version: Get; @@ -270,8 +258,8 @@ pub trait Trait: 'static + Eq + Clone { type SystemWeightInfo: WeightInfo; } -pub type DigestOf = generic::Digest<::Hash>; -pub type DigestItemOf = generic::DigestItem<::Hash>; +pub type DigestOf = generic::Digest<::Hash>; +pub type DigestItemOf = generic::DigestItem<::Hash>; pub type Key = Vec; pub type KeyValue = (Vec, Vec); @@ -329,7 +317,7 @@ impl From> for RawOrigin { } /// Exposed trait-generic origin type. -pub type Origin = RawOrigin<::AccountId>; +pub type Origin = RawOrigin<::AccountId>; // Create a Hash with 69 for each byte, // only used to build genesis config. @@ -390,7 +378,7 @@ impl From for LastRuntimeUpgradeInfo { } decl_storage! { - trait Store for Module as System { + trait Store for Module as System { /// The full account information for a particular account ID. pub Account get(fn account): map hasher(blake2_128_concat) T::AccountId => AccountInfo; @@ -399,7 +387,7 @@ decl_storage! { ExtrinsicCount: Option; /// The current weight for the block. - BlockWeight get(fn block_weight): weight::ExtrinsicsWeight; + BlockWeight get(fn block_weight): ConsumedWeight; /// Total length (in bytes) for all extrinsics put together, for the current block. AllExtrinsicsLen: Option; @@ -478,7 +466,7 @@ decl_storage! { decl_event!( /// Event for the System module. - pub enum Event where AccountId = ::AccountId { + pub enum Event where AccountId = ::AccountId { /// An extrinsic completed successfully. \[info\] ExtrinsicSuccess(DispatchInfo), /// An extrinsic failed. \[error, info\] @@ -494,7 +482,7 @@ decl_event!( decl_error! { /// Error for the System module - pub enum Error for Module { + pub enum Error for Module { /// The name of specification does not match between the current runtime /// and the new runtime. InvalidSpecName, @@ -513,26 +501,17 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { type Error = Error; /// The maximum number of blocks to allow in mortal eras. const BlockHashCount: T::BlockNumber = T::BlockHashCount::get(); - /// The maximum weight of a block. - const MaximumBlockWeight: Weight = T::MaximumBlockWeight::get(); - /// The weight of runtime database operations the runtime can invoke. const DbWeight: RuntimeDbWeight = T::DbWeight::get(); - /// The base weight of executing a block, independent of the transactions in the block. - const BlockExecutionWeight: Weight = T::BlockExecutionWeight::get(); - - /// The base weight of an Extrinsic in the block, independent of the of extrinsic being executed. - const ExtrinsicBaseWeight: Weight = T::ExtrinsicBaseWeight::get(); - - /// The maximum length of a block (in bytes). - const MaximumBlockLength: u32 = T::MaximumBlockLength::get(); + /// The weight configuration (limits & base values) for each class of extrinsics and block. + const BlockWeights: limits::BlockWeights = T::BlockWeights::get(); fn on_runtime_upgrade() -> frame_support::weights::Weight { if !UpgradedToU32RefCount::get() { @@ -540,16 +519,22 @@ decl_module! { Some(AccountInfo { nonce, refcount: rc as RefCount, data }) ); UpgradedToU32RefCount::put(true); - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block } else { 0 } } + fn integrity_test() { + T::BlockWeights::get() + .validate() + .expect("The weights are invalid."); + } + /// A dispatch that will fill the block weight up to the given ratio. // TODO: This should only be available for testing, rather than in general usage, but // that's not possible at present (since it's within the decl_module macro). - #[weight = *_ratio * T::MaximumBlockWeight::get()] + #[weight = *_ratio * T::BlockWeights::get().max_block] fn fill_block(origin, _ratio: Perbill) { ensure_root(origin)?; } @@ -590,7 +575,7 @@ decl_module! { /// The weight of this function is dependent on the runtime, but generally this is very expensive. /// We will treat this as a full block. /// # - #[weight = (T::MaximumBlockWeight::get(), DispatchClass::Operational)] + #[weight = (T::BlockWeights::get().max_block, DispatchClass::Operational)] pub fn set_code(origin, code: Vec) { ensure_root(origin)?; Self::can_set_code(&code)?; @@ -607,7 +592,7 @@ decl_module! { /// - 1 event. /// The weight of this function is dependent on the runtime. We will treat this as a full block. /// # - #[weight = (T::MaximumBlockWeight::get(), DispatchClass::Operational)] + #[weight = (T::BlockWeights::get().max_block, DispatchClass::Operational)] pub fn set_code_without_checks(origin, code: Vec) { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::CODE, &code); @@ -897,12 +882,16 @@ pub enum RefStatus { Unreferenced, } -impl Module { +impl Module { /// Deposits an event into this block's event record. pub fn deposit_event(event: impl Into) { Self::deposit_event_indexed(&[], event.into()); } + pub fn account_exists(who: &T::AccountId) -> bool { + Account::::contains_key(who) + } + /// Increment the reference counter on an account. pub fn inc_ref(who: &T::AccountId) { Account::::mutate(who, |a| a.refcount = a.refcount.saturating_add(1)); @@ -1024,15 +1013,27 @@ impl Module { } } - /// Remove temporary "environment" entries in storage. + /// Remove temporary "environment" entries in storage, compute the storage root and return the + /// resulting header for this block. pub fn finalize() -> T::Header { ExecutionPhase::kill(); ExtrinsicCount::kill(); AllExtrinsicsLen::kill(); - let number = >::take(); - let parent_hash = >::take(); - let mut digest = >::take(); + // The following fields + // + // - > + // - > + // - > + // - > + // - > + // - > + // + // stay to be inspected by the client and will be cleared by `Self::initialize`. + let number = >::get(); + let parent_hash = >::get(); + let mut digest = >::get(); + let extrinsics_root = >::take(); // move block hash pruning window by one block @@ -1060,14 +1061,6 @@ impl Module { digest.push(item); } - // The following fields - // - // - > - // - > - // - > - // - // stay to be inspected by the client and will be cleared by `Self::initialize`. - ::new(number, extrinsics_root, storage_root, parent_hash, digest) } @@ -1116,9 +1109,9 @@ impl Module { /// Set the current block weight. This should only be used in some integration tests. #[cfg(any(feature = "std", test))] - pub fn set_block_limits(weight: Weight, len: usize) { + pub fn set_block_consumed_resources(weight: Weight, len: usize) { BlockWeight::mutate(|current_weight| { - current_weight.put(weight, DispatchClass::Normal) + current_weight.set(weight, DispatchClass::Normal) }); AllExtrinsicsLen::put(len as u32); } @@ -1252,7 +1245,7 @@ impl Module { /// Event handler which calls on_created_account when it happens. pub struct CallOnCreatedAccount(PhantomData); -impl Happened for CallOnCreatedAccount { +impl Happened for CallOnCreatedAccount { fn happened(who: &T::AccountId) { Module::::on_created_account(who.clone()); } @@ -1260,15 +1253,15 @@ impl Happened for CallOnCreatedAccount { /// Event handler which calls kill_account when it happens. pub struct CallKillAccount(PhantomData); -impl Happened for CallKillAccount { +impl Happened for CallKillAccount { fn happened(who: &T::AccountId) { Module::::kill_account(who) } } -impl BlockNumberProvider for Module +impl BlockNumberProvider for Module { - type BlockNumber = ::BlockNumber; + type BlockNumber = ::BlockNumber; fn current_block_number() -> Self::BlockNumber { Module::::block_number() @@ -1278,7 +1271,7 @@ impl BlockNumberProvider for Module // Implement StoredMap for a simple single-item, kill-account-on-remove system. This works fine for // storing a single item which is required to not be empty/default for the account to exist. // Anything more complex will need more sophisticated logic. -impl StoredMap for Module { +impl StoredMap for Module { fn get(k: &T::AccountId) -> T::AccountData { Account::::get(k).data } @@ -1344,8 +1337,7 @@ pub fn split_inner(option: Option, splitter: impl FnOnce(T) -> (R, S } } - -impl IsDeadAccount for Module { +impl IsDeadAccount for Module { fn is_dead_account(who: &T::AccountId) -> bool { !Account::::contains_key(who) } @@ -1358,7 +1350,7 @@ impl Default for ChainContext { } } -impl Lookup for ChainContext { +impl Lookup for ChainContext { type Source = ::Source; type Target = ::Target; diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs new file mode 100644 index 0000000000000000000000000000000000000000..aac347b8e6580ed94d0395d44ab53bef1c696953 --- /dev/null +++ b/frame/system/src/limits.rs @@ -0,0 +1,434 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Block resource limits configuration structures. +//! +//! FRAME defines two resources that are limited within a block: +//! - Weight (execution cost/time) +//! - Length (block size) +//! +//! `frame_system` tracks consumption of each of these resources separately for each +//! `DispatchClass`. This module contains configuration object for both resources, +//! which should be passed to `frame_system` configuration when runtime is being set up. + +use frame_support::weights::{Weight, DispatchClass, constants, PerDispatchClass, OneOrMany}; +use sp_runtime::{RuntimeDebug, Perbill}; + +/// Block length limit configuration. +#[derive(RuntimeDebug, Clone)] +pub struct BlockLength { + /// Maximal total length in bytes for each extrinsic class. + /// + /// In the worst case, the total block length is going to be: + /// `MAX(max)` + pub max: PerDispatchClass, +} + +impl Default for BlockLength { + fn default() -> Self { + BlockLength::max_with_normal_ratio( + 5 * 1024 * 1024, + DEFAULT_NORMAL_RATIO, + ) + } +} + +impl BlockLength { + /// Create new `BlockLength` with `max` for every class. + pub fn max(max: u32) -> Self { + Self { + max: PerDispatchClass::new(|_| max), + } + } + + /// Create new `BlockLength` with `max` for `Operational` & `Mandatory` + /// and `normal * max` for `Normal`. + pub fn max_with_normal_ratio(max: u32, normal: Perbill) -> Self { + Self { + max: PerDispatchClass::new(|class| if class == DispatchClass::Normal { + normal * max + } else { + max + }), + } + } +} + +#[derive(Default, RuntimeDebug)] +pub struct ValidationErrors { + pub has_errors: bool, + #[cfg(feature = "std")] + pub errors: Vec, +} + +macro_rules! error_assert { + ($cond : expr, $err : expr, $format : expr $(, $params: expr )*$(,)*) => { + if !$cond { + $err.has_errors = true; + #[cfg(feature = "std")] + { $err.errors.push(format!($format $(, &$params )*)); } + } + } +} + +/// A result of validating `BlockWeights` correctness. +pub type ValidationResult = Result; + +/// A ratio of `Normal` dispatch class within block, used as default value for +/// `BlockWeight` and `BlockLength`. The `Default` impls are provided mostly for convenience +/// to use in tests. +const DEFAULT_NORMAL_RATIO: Perbill = Perbill::from_percent(75); + +/// `DispatchClass`-specific weight configuration. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode)] +pub struct WeightsPerClass { + /// Base weight of single extrinsic of given class. + pub base_extrinsic: Weight, + /// Maximal weight of single extrinsic. Should NOT include `base_extrinsic` cost. + /// + /// `None` indicates that this class of extrinsics doesn't have a limit. + pub max_extrinsic: Option, + /// Block maximal total weight for all extrinsics of given class. + /// + /// `None` indicates that weight sum of this class of extrinsics is not + /// restricted. Use this value carefully, since it might produce heavily oversized + /// blocks. + /// + /// In the worst case, the total weight consumed by the class is going to be: + /// `MAX(max_total) + MAX(reserved)`. + pub max_total: Option, + /// Block reserved allowance for all extrinsics of a particular class. + /// + /// Setting to `None` indicates that extrinsics of that class are allowed + /// to go over total block weight (but at most `max_total` for that class). + /// Setting to `Some(x)` guarantees that at least `x` weight of particular class + /// is processed in every block. + pub reserved: Option, +} + +/// Block weight limits & base values configuration. +/// +/// This object is responsible for defining weight limits and base weight values tracked +/// during extrinsic execution. +/// +/// Each block starts with `base_block` weight being consumed right away. Next up the +/// `on_initialize` pallet callbacks are invoked and their cost is added before any extrinsic +/// is executed. This cost is tracked as `Mandatory` dispatch class. +/// +/// | | `max_block` | | +/// | | | | +/// | | | | +/// | | | | +/// | | | #| `on_initialize` +/// | #| `base_block` | #| +/// |NOM| |NOM| +/// ||\_ Mandatory +/// |\__ Operational +/// \___ Normal +/// +/// The remaining capacity can be used to dispatch extrinsics. Note that each dispatch class +/// is being tracked separately, but the sum can't exceed `max_block` (except for `reserved`). +/// Below you can see a picture representing full block with 3 extrinsics (two `Operational` and +/// one `Normal`). Each class has it's own limit `max_total`, but also the sum cannot exceed +/// `max_block` value. +/// -- `Mandatory` limit (unlimited) +/// | # | | | +/// | # | `Ext3` | - - `Operational` limit +/// |# | `Ext2` |- - `Normal` limit +/// | # | `Ext1` | # | +/// | #| `on_initialize` | ##| +/// | #| `base_block` |###| +/// |NOM| |NOM| +/// +/// It should be obvious now that it's possible for one class to reach it's limit (say `Normal`), +/// while the block has still capacity to process more transactions (`max_block` not reached, +/// `Operational` transactions can still go in). Setting `max_total` to `None` disables the +/// per-class limit. This is generally highly recommended for `Mandatory` dispatch class, while it +/// can be dangerous for `Normal` class and should only be done with extra care and consideration. +/// +/// Often it's desirable for some class of transactions to be added to the block despite it being +/// full. For instance one might want to prevent high-priority `Normal` transactions from pushing +/// out lower-priority `Operational` transactions. In such cases you might add a `reserved` capacity +/// for given class. +/// _ +/// # \ +/// # `Ext8` - `reserved` +/// # _/ +/// | # | `Ext7 | - - `Operational` limit +/// |# | `Ext6` | | +/// |# | `Ext5` |-# - `Normal` limit +/// |# | `Ext4` |## | +/// | #| `on_initialize` |###| +/// | #| `base_block` |###| +/// |NOM| |NOM| +/// +/// In the above example, `Ext4-6` fill up the block almost up to `max_block`. `Ext7` would not fit +/// if there wasn't the extra `reserved` space for `Operational` transactions. Note that `max_total` +/// limit applies to `reserved` space as well (i.e. the sum of weights of `Ext7` & `Ext8` mustn't +/// exceed it). Setting `reserved` to `None` allows the extrinsics to always get into the block up +/// to their `max_total` limit. If `max_total` is set to `None` as well, all extrinsics witch +/// dispatchables of given class will always end up in the block (recommended for `Mandatory` +/// dispatch class). +/// +/// As a consequence of `reserved` space, total consumed block weight might exceed `max_block` +/// value, so this parameter should rather be thought of as "target block weight" than a hard limit. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode)] +pub struct BlockWeights { + /// Base weight of block execution. + pub base_block: Weight, + /// Maximal total weight consumed by all kinds of extrinsics (without `reserved` space). + pub max_block: Weight, + /// Weight limits for extrinsics of given dispatch class. + pub per_class: PerDispatchClass, +} + +impl Default for BlockWeights { + fn default() -> Self { + Self::with_sensible_defaults( + 1 * constants::WEIGHT_PER_SECOND, + DEFAULT_NORMAL_RATIO, + ) + } +} + +impl BlockWeights { + /// Get per-class weight settings. + pub fn get(&self, class: DispatchClass) -> &WeightsPerClass { + self.per_class.get(class) + } + + /// Verifies correctness of this `BlockWeights` object. + pub fn validate(self) -> ValidationResult { + fn or_max(w: Option) -> Weight { + w.unwrap_or_else(|| Weight::max_value()) + } + let mut error = ValidationErrors::default(); + + for class in DispatchClass::all() { + let weights = self.per_class.get(*class); + let max_for_class = or_max(weights.max_total); + let base_for_class = weights.base_extrinsic; + let reserved = or_max(weights.reserved); + // Make sure that if total is set it's greater than base_block && + // base_for_class + error_assert!( + (max_for_class > self.base_block && max_for_class > base_for_class) + || max_for_class == 0, + &mut error, + "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", + class, max_for_class, self.base_block, base_for_class, + ); + // Max extrinsic can't be greater than max_for_class. + error_assert!( + weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), + &mut error, + "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", + class, weights.max_extrinsic, + max_for_class.saturating_sub(base_for_class), + ); + // Max extrinsic should not be 0 + error_assert!( + weights.max_extrinsic.unwrap_or_else(|| Weight::max_value()) > 0, + &mut error, + "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", + class, weights.max_extrinsic, + ); + // Make sure that if reserved is set it's greater than base_for_class. + error_assert!( + reserved > base_for_class || reserved == 0, + &mut error, + "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", + class, reserved, base_for_class, + ); + // Make sure max block is greater than max_total if it's set. + error_assert!( + self.max_block >= weights.max_total.unwrap_or(0), + &mut error, + "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", + class, self.max_block, weights.max_total, + ); + // Make sure we can fit at least one extrinsic. + error_assert!( + self.max_block > base_for_class + self.base_block, + &mut error, + "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", + class, self.max_block, base_for_class + self.base_block, + ); + } + + if error.has_errors { + Err(error) + } else { + Ok(self) + } + } + + /// Create new weights definition, with both `Normal` and `Operational` + /// classes limited to given weight. + /// + /// Note there is no reservation for `Operational` class, so this constructor + /// is not suitable for production deployments. + pub fn simple_max(block_weight: Weight) -> Self { + Self::builder() + .base_block(0) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = 0; + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = block_weight.into(); + }) + .build() + .expect("We only specify max_total and leave base values as defaults; qed") + } + + /// Create a sensible default weights system given only expected maximal block weight and the + /// ratio that `Normal` extrinsics should occupy. + /// + /// Assumptions: + /// - Average block initialization is assumed to be `10%`. + /// - `Operational` transactions have reserved allowance (`1.0 - normal_ratio`) + pub fn with_sensible_defaults( + expected_block_weight: Weight, + normal_ratio: Perbill, + ) -> Self { + let normal_weight = normal_ratio * expected_block_weight; + Self::builder() + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = normal_weight.into(); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = expected_block_weight.into(); + weights.reserved = (expected_block_weight - normal_weight).into(); + }) + .avg_block_initialization(Perbill::from_percent(10)) + .build() + .expect("Sensible defaults are tested to be valid; qed") + } + + /// Start constructing new `BlockWeights` object. + /// + /// By default all kinds except of `Mandatory` extrinsics are disallowed. + pub fn builder() -> BlockWeightsBuilder { + BlockWeightsBuilder { + weights: BlockWeights { + base_block: constants::BlockExecutionWeight::get(), + max_block: 0, + per_class: PerDispatchClass::new(|class| { + let initial = if class == DispatchClass::Mandatory { None } else { Some(0) }; + WeightsPerClass { + base_extrinsic: constants::ExtrinsicBaseWeight::get(), + max_extrinsic: None, + max_total: initial, + reserved: initial, + } + }), + }, + init_cost: None, + } + } +} + +/// An opinionated builder for `Weights` object. +pub struct BlockWeightsBuilder { + weights: BlockWeights, + init_cost: Option, +} + +impl BlockWeightsBuilder { + /// Set base block weight. + pub fn base_block(mut self, base_block: Weight) -> Self { + self.weights.base_block = base_block; + self + } + + /// Average block initialization weight cost. + /// + /// This value is used to derive maximal allowed extrinsic weight for each + /// class, based on the allowance. + /// + /// This is to make sure that extrinsics don't stay forever in the pool, + /// because they could seamingly fit the block (since they are below `max_block`), + /// but the cost of calling `on_initialize` alway prevents them from being included. + pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { + self.init_cost = Some(init_cost); + self + } + + /// Set parameters for particular class. + /// + /// Note: `None` values of `max_extrinsic` will be overwritten in `build` in case + /// `avg_block_initialization` rate is set to a non-zero value. + pub fn for_class( + mut self, + class: impl OneOrMany, + action: impl Fn(&mut WeightsPerClass), + ) -> Self { + for class in class.into_iter() { + action(self.weights.per_class.get_mut(class)); + } + self + } + + /// Construct the `BlockWeights` object. + pub fn build(self) -> ValidationResult { + // compute max extrinsic size + let Self { mut weights, init_cost } = self; + + // compute max block size. + for class in DispatchClass::all() { + weights.max_block = match weights.per_class.get(*class).max_total { + Some(max) if max > weights.max_block => max, + _ => weights.max_block, + }; + } + // compute max size of single extrinsic + if let Some(init_weight) = init_cost.map(|rate| rate * weights.max_block) { + for class in DispatchClass::all() { + let per_class = weights.per_class.get_mut(*class); + if per_class.max_extrinsic.is_none() && init_cost.is_some() { + per_class.max_extrinsic = per_class.max_total + .map(|x| x.saturating_sub(init_weight)) + .map(|x| x.saturating_sub(per_class.base_extrinsic)); + } + } + } + + // Validate the result + weights.validate() + } + + /// Construct the `BlockWeights` object or panic if it's invalid. + /// + /// This is a convenience method to be called whenever you construct a runtime. + pub fn build_or_panic(self) -> BlockWeights { + self.build().expect( + "Builder finished with `build_or_panic`; The panic is expected if runtime weights are not correct" + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn default_weights_are_valid() { + BlockWeights::default() + .validate() + .unwrap(); + } +} diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index cd67a74114073d38191fe9dd343c064e7b667d78..1558a5ed3970885ffb767ef0dfc770423ebca5bd 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -34,12 +34,11 @@ impl_outer_origin! { #[derive(Clone, Eq, PartialEq, Debug, Default)] pub struct Test; +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +const MAX_BLOCK_WEIGHT: Weight = 1024; + parameter_types! { pub const BlockHashCount: u64 = 10; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumExtrinsicWeight: Weight = 768; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - pub const MaximumBlockLength: u32 = 1024; pub Version: RuntimeVersion = RuntimeVersion { spec_name: sp_version::create_runtime_str!("test"), impl_name: sp_version::create_runtime_str!("system-test"), @@ -49,12 +48,28 @@ parameter_types! { apis: sp_version::create_apis_vec!([]), transaction_version: 1, }; - pub const BlockExecutionWeight: Weight = 10; - pub const ExtrinsicBaseWeight: Weight = 5; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, write: 100, }; + pub RuntimeBlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + .base_block(10) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = 5; + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAX_BLOCK_WEIGHT); + weights.reserved = Some( + MAX_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(Perbill::from_percent(0)) + .build_or_panic(); + pub RuntimeBlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(1024, NORMAL_DISPATCH_RATIO); } thread_local!{ @@ -71,7 +86,7 @@ pub struct Call; impl Dispatchable for Call { type Origin = Origin; - type Trait = (); + type Config = (); type Info = DispatchInfo; type PostInfo = PostDispatchInfo; fn dispatch(self, _origin: Self::Origin) @@ -80,8 +95,10 @@ impl Dispatchable for Call { } } -impl Trait for Test { +impl Config for Test { type BaseCallFilter = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; type Origin = Origin; type Call = Call; type Index = u64; @@ -93,13 +110,7 @@ impl Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = DbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = Version; type PalletInfo = (); type AccountData = u32; @@ -109,16 +120,16 @@ impl Trait for Test { } pub type System = Module; -pub type SysEvent = ::Event; +pub type SysEvent = ::Event; -pub const CALL: &::Call = &Call; +pub const CALL: &::Call = &Call; /// Create new externalities for `System` module tests. pub fn new_test_ext() -> sp_io::TestExternalities { let mut ext: sp_io::TestExternalities = GenesisConfig::default().build_storage::().unwrap().into(); // Add to each test the initial weight of a block ext.execute_with(|| System::register_extra_weight_unchecked( - ::BlockExecutionWeight::get(), + ::BlockWeights::get().base_block, DispatchClass::Mandatory )); ext diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 25d18ac6bf255eebe7625f6ecc7c6b060b676505..f5186234b6021eba9d9222209cb4ab5d49c03a59 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -447,7 +447,7 @@ pub trait AppCrypto { /// // TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? // Seems that this may cause issues with bounds resolution. -pub trait SigningTypes: crate::Trait { +pub trait SigningTypes: crate::Config { /// A public key that is capable of identifing `AccountId`s. /// /// Usually that's either a raw crypto public key (e.g. `sr25519::Public`) or diff --git a/frame/system/src/weight.rs b/frame/system/src/weight.rs deleted file mode 100644 index 93295093c4fb88aaff70c259d889a6ce1e352a04..0000000000000000000000000000000000000000 --- a/frame/system/src/weight.rs +++ /dev/null @@ -1,76 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use codec::{Encode, Decode}; -use frame_support::weights::{Weight, DispatchClass}; -use sp_runtime::RuntimeDebug; - -/// An object to track the currently used extrinsic weight in a block. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] -pub struct ExtrinsicsWeight { - normal: Weight, - operational: Weight, -} - -impl ExtrinsicsWeight { - /// Returns the total weight consumed by all extrinsics in the block. - pub fn total(&self) -> Weight { - self.normal.saturating_add(self.operational) - } - - /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn add(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_add(weight); - } - - /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would - /// occur. - pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { - let value = self.get_mut(class); - *value = value.checked_add(weight).ok_or(())?; - Ok(()) - } - - /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of - /// `Weight`. - pub fn sub(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_sub(weight); - } - - /// Get the current weight of a specific dispatch class. - pub fn get(&self, class: DispatchClass) -> Weight { - match class { - DispatchClass::Operational => self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => self.normal, - } - } - - /// Get a mutable reference to the current weight of a specific dispatch class. - fn get_mut(&mut self, class: DispatchClass) -> &mut Weight { - match class { - DispatchClass::Operational => &mut self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => &mut self.normal, - } - } - - /// Set the weight of a specific dispatch class. - pub fn put(&mut self, new: Weight, class: DispatchClass) { - *self.get_mut(class) = new; - } -} diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 5f3c84deb41c70b7720929a171b4bf20d9843002..99ea4a033ca9fd40ed65d40f9b7a99bf6b1f849f 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -54,7 +54,7 @@ pub trait WeightInfo { /// Weights for frame_system using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn remark(_b: u32, ) -> Weight { (1_973_000 as Weight) } diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md index 54ef7fa43b4f0cb39bc0bc396140e87c34d441ba..de1fb74392225707a2759cc3b5d6c9ff41570f74 100644 --- a/frame/timestamp/README.md +++ b/frame/timestamp/README.md @@ -29,7 +29,7 @@ because of cumulative calculation errors and hence should be avoided. * `get` - Gets the current time for the current block. If this function is called prior to setting the timestamp, it will return the timestamp of the previous block. -### Trait Getters +### Config Getters * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. @@ -48,10 +48,10 @@ trait from the timestamp trait. use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; -pub trait Trait: timestamp::Trait {} +pub trait Config: timestamp::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn get_time(origin) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -71,4 +71,4 @@ the Timestamp module for session management. * [Session](https://docs.rs/pallet-session/latest/pallet_session/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index d546a34017d0a7b7510e0db10923e119ac255749..b62777832ab78e030c3e37cbbb14d150dd6df9a8 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -19,7 +19,7 @@ //! //! The Timestamp module provides functionality to get and set the on-chain time. //! -//! - [`timestamp::Trait`](./trait.Trait.html) +//! - [`timestamp::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! - [`Module`](./struct.Module.html) //! @@ -46,7 +46,7 @@ //! * `get` - Gets the current time for the current block. If this function is called prior to //! setting the timestamp, it will return the timestamp of the previous block. //! -//! ### Trait Getters +//! ### Config Getters //! //! * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. //! @@ -66,10 +66,10 @@ //! # use pallet_timestamp as timestamp; //! use frame_system::ensure_signed; //! -//! pub trait Trait: timestamp::Trait {} +//! pub trait Config: timestamp::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn get_time(origin) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; @@ -118,7 +118,7 @@ use sp_timestamp::{ pub use weights::WeightInfo; /// The module configuration trait -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Type used for expressing timestamp. type Moment: Parameter + Default + AtLeast32Bit + Scale + Copy; @@ -137,7 +137,7 @@ pub trait Trait: frame_system::Trait { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The minimum period between blocks. Beware that this is different to the *expected* period /// that the block production apparatus provides. Your chosen consensus system will generally /// work with this to determine a sensible block time. e.g. For Aura, it will be double this @@ -194,7 +194,7 @@ decl_module! { } decl_storage! { - trait Store for Module as Timestamp { + trait Store for Module as Timestamp { /// Current time for the current block. pub Now get(fn now): T::Moment; @@ -203,7 +203,7 @@ decl_storage! { } } -impl Module { +impl Module { /// Get the current time for the current block. /// /// NOTE: if this function is called prior to setting the timestamp, @@ -225,7 +225,7 @@ fn extract_inherent_data(data: &InherentData) -> Result ProvideInherent for Module { +impl ProvideInherent for Module { type Call = Call; type Error = InherentError; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; @@ -260,7 +260,7 @@ impl ProvideInherent for Module { } } -impl Time for Module { +impl Time for Module { type Moment = T::Moment; /// Before the first set of now with inherent the value returned is zero. @@ -272,7 +272,7 @@ impl Time for Module { /// Before the timestamp inherent is applied, it returns the time of previous block. /// /// On genesis the time returned is not valid. -impl UnixTime for Module { +impl UnixTime for Module { fn now() -> core::time::Duration { // now is duration since unix epoch in millisecond as documented in // `sp_timestamp::InherentDataProvider`. @@ -292,10 +292,10 @@ impl UnixTime for Module { mod tests { use super::*; - use frame_support::{impl_outer_origin, assert_ok, parameter_types, weights::Weight}; + use frame_support::{impl_outer_origin, assert_ok, parameter_types}; use sp_io::TestExternalities; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; pub fn new_test_ext() -> TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -310,12 +310,14 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -327,13 +329,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = (); @@ -344,7 +339,7 @@ mod tests { parameter_types! { pub const MinimumPeriod: u64 = 5; } - impl Trait for Test { + impl Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 67ce28ba9111f37452f0ccf572ba6d2783625add..d3f2dcc7ba6fa0fec543e3402372b8058a70aa5f 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -50,7 +50,7 @@ pub trait WeightInfo { /// Weights for pallet_timestamp using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn set() -> Weight { (11_650_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..386d49372c76915cc3ac9d6eee812d869d605ecb --- /dev/null +++ b/frame/tips/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pallet-tips" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage tips" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-treasury = { version = "2.0.0", default-features = false, path = "../treasury" } + +frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +pallet-balances = { version = "2.0.0", path = "../balances" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "pallet-treasury/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] diff --git a/frame/tips/README.md b/frame/tips/README.md new file mode 100644 index 0000000000000000000000000000000000000000..457e5b3bd0e79d106194854427ca1f0ce5d2764b --- /dev/null +++ b/frame/tips/README.md @@ -0,0 +1,32 @@ +# Tipping Module ( pallet-tips ) + +**Note :: This pallet is tightly coupled to pallet-treasury** + +A subsystem to allow for an agile "tipping" process, whereby a reward may be given without first +having a pre-determined stakeholder group come to consensus on how much should be paid. + +A group of `Tippers` is determined through the config `Trait`. After half of these have declared +some amount that they believe a particular reported reason deserves, then a countdown period is +entered where any remaining members can declare their tip amounts also. After the close of the +countdown period, the median of all declared tips is paid to the reported beneficiary, along with +any finders fee, in case of a public (and bonded) original report. + +### Terminology + +- **Tipping:** The process of gathering declarations of amounts to tip and taking the median amount + to be transferred from the treasury to a beneficiary account. +- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a particular + individual (identified by an account ID) is worthy of a recognition by the treasury. +- **Finder:** The original public reporter of some reason for tipping. +- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, + rather than the main beneficiary. + +## Interface + +### Dispatchable Functions + +- `report_awesome` - Report something worthy of a tip and register for a finders fee. +- `retract_tip` - Retract a previous (finders fee registered) report. +- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +- `tip` - Declare or redeclare an amount to tip for a particular reason. +- `close_tip` - Close and pay out a tip. diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..71f9002b9bf119db86598b73bc776438f056748a --- /dev/null +++ b/frame/tips/src/benchmarking.rs @@ -0,0 +1,193 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Treasury tips benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use sp_runtime::{traits::{Saturating}}; + +use crate::Module as TipsMod; + +const SEED: u32 = 0; + +// Create the pre-requisite information needed to create a `report_awesome`. +fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { + let caller = whitelisted_caller(); + let value = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); + let _ = T::Currency::make_free_balance_be(&caller, value); + let reason = vec![0; length as usize]; + let awesome_person = account("awesome", 0, SEED); + (caller, reason, awesome_person) +} + +// Create the pre-requisite information needed to call `tip_new`. +fn setup_tip(r: u32, t: u32) -> + Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> +{ + let tippers_count = T::Tippers::count(); + + for i in 0 .. t { + let member = account("member", i, SEED); + T::Tippers::add(&member); + ensure!(T::Tippers::contains(&member), "failed to add tipper"); + } + + ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); + let caller = account("member", t - 1, SEED); + let reason = vec![0; r as usize]; + let beneficiary = account("beneficiary", t, SEED); + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + Ok((caller, reason, beneficiary, value)) +} + +// Create `t` new tips for the tip proposal with `hash`. +// This function automatically makes the tip able to close. +fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> + Result<(), &'static str> +{ + for i in 0 .. t { + let caller = account("member", i, SEED); + ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); + TipsMod::::tip(RawOrigin::Signed(caller).into(), hash, value)?; + } + Tips::::mutate(hash, |maybe_tip| { + if let Some(open_tip) = maybe_tip { + open_tip.closes = Some(T::BlockNumber::zero()); + } + }); + Ok(()) +} + +fn setup_pod_account() { + let pot_account = TipsMod::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); +} + +const MAX_BYTES: u32 = 16384; +const MAX_TIPPERS: u32 = 100; + +benchmarks! { + _ { } + + report_awesome { + let r in 0 .. MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), reason, awesome_person) + + retract_tip { + let r = MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + TipsMod::::report_awesome( + RawOrigin::Signed(caller.clone()).into(), + reason.clone(), + awesome_person.clone() + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash) + + tip_new { + let r in 0 .. MAX_BYTES; + let t in 1 .. MAX_TIPPERS; + + let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), reason, beneficiary, value) + + tip { + let t in 1 .. MAX_TIPPERS; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t - 1, hash.clone(), value)?; + let caller = account("member", t - 1, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash, value) + + close_tip { + let t in 1 .. MAX_TIPPERS; + + // Make sure pot is funded + setup_pod_account::(); + + // Set up a new tip proposal + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + + // Create a bunch of tips + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t, hash.clone(), value)?; + + let caller = account("caller", t, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash) + +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_report_awesome::()); + assert_ok!(test_benchmark_retract_tip::()); + assert_ok!(test_benchmark_tip_new::()); + assert_ok!(test_benchmark_tip::()); + assert_ok!(test_benchmark_close_tip::()); + }); + } +} diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3507b220d5dbf6d86f1078e0ce5a0c21736f7157 --- /dev/null +++ b/frame/tips/src/lib.rs @@ -0,0 +1,576 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Tipping Module ( pallet-tips ) +//! +//! > NOTE: This pallet is tightly coupled with pallet-treasury. +//! +//! A subsystem to allow for an agile "tipping" process, whereby a reward may be given without first +//! having a pre-determined stakeholder group come to consensus on how much should be paid. +//! +//! A group of `Tippers` is determined through the config `Config`. After half of these have declared +//! some amount that they believe a particular reported reason deserves, then a countdown period is +//! entered where any remaining members can declare their tip amounts also. After the close of the +//! countdown period, the median of all declared tips is paid to the reported beneficiary, along +//! with any finders fee, in case of a public (and bonded) original report. +//! +//! +//! ### Terminology +//! +//! Tipping protocol: +//! - **Tipping:** The process of gathering declarations of amounts to tip and taking the median +//! amount to be transferred from the treasury to a beneficiary account. +//! - **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a +//! particular individual (identified by an account ID) is worthy of a recognition by the +//! treasury. +//! - **Finder:** The original public reporter of some reason for tipping. +//! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, +//! rather than the main beneficiary. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! Tipping protocol: +//! - `report_awesome` - Report something worthy of a tip and register for a finders fee. +//! - `retract_tip` - Retract a previous (finders fee registered) report. +//! - `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +//! - `tip` - Declare or redeclare an amount to tip for a particular reason. +//! - `close_tip` - Close and pay out a tip. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod tests; +mod benchmarking; +pub mod weights; + +use sp_std::if_std; + +use sp_std::prelude::*; +use frame_support::{decl_module, decl_storage, decl_event, ensure, decl_error, Parameter}; +use frame_support::traits::{ + Currency, Get, ExistenceRequirement::{KeepAlive}, + ReservableCurrency +}; + +use sp_runtime::{ Percent, RuntimeDebug, traits::{ + Zero, AccountIdConversion, Hash, BadOrigin +}}; + +use frame_support::traits::{Contains, ContainsLengthBound}; +use codec::{Encode, Decode}; +use frame_system::{self as system, ensure_signed}; +pub use weights::WeightInfo; + +pub type BalanceOf = pallet_treasury::BalanceOf; +pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; + +pub trait Config: frame_system::Config + pallet_treasury::Config { + /// Maximum acceptable reason length. + type MaximumReasonLength: Get; + + /// The amount held on deposit per byte within the tip report reason or bounty description. + type DataDepositPerByte: Get>; + + /// Origin from which tippers must come. + /// + /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy operation). + type Tippers: Contains + ContainsLengthBound; + + /// The period for which a tip remains open after is has achieved threshold tippers. + type TipCountdown: Get; + + /// The percent of the final tip which goes to the original reporter of the tip. + type TipFindersFee: Get; + + /// The amount held on deposit for placing a tip report. + type TipReportDepositBase: Get>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +/// An open tipping "motion". Retains all details of a tip including information on the finder +/// and the members who have voted. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] +pub struct OpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, +> { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip. + finder: AccountId, + /// The amount held on deposit for this tip. + deposit: Balance, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + /// Whether this tip should result in the finder taking a fee. + finders_fee: bool, +} + +// Note :: For backward compatability reasons, +// pallet-tips uses Treasury for storage. +// This is temporary solution, soon will get replaced with +// Own storage identifier. +decl_storage! { + trait Store for Module as Treasury { + + /// TipsMap that are not yet completed. Keyed by the hash of `(reason, who)` from the value. + /// This has the insecure enumerable hash function since the key itself is already + /// guaranteed to be a secure hash. + pub Tips get(fn tips): + map hasher(twox_64_concat) T::Hash + => Option, T::BlockNumber, T::Hash>>; + + /// Simple preimage lookup from the reason's hash to the original data. Again, has an + /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. + pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; + + } +} + +decl_event!( + pub enum Event + where + Balance = BalanceOf, + ::AccountId, + ::Hash, + { + /// A new tip suggestion has been opened. \[tip_hash\] + NewTip(Hash), + /// A tip suggestion has reached threshold and is closing. \[tip_hash\] + TipClosing(Hash), + /// A tip suggestion has been closed. \[tip_hash, who, payout\] + TipClosed(Hash, AccountId, Balance), + /// A tip suggestion has been retracted. \[tip_hash\] + TipRetracted(Hash), + } +); + +decl_error! { + /// Error for the tips module. + pub enum Error for Module { + /// The reason given is just too big. + ReasonTooBig, + /// The tip was already found/started. + AlreadyKnown, + /// The tip hash is unknown. + UnknownTip, + /// The account attempting to retract the tip is not the finder of the tip. + NotFinder, + /// The tip cannot be claimed/closed because there are not enough tippers yet. + StillOpen, + /// The tip cannot be claimed/closed because it's still in the countdown period. + Premature, + } +} + +decl_module! { + pub struct Module + for enum Call + where origin: T::Origin + { + + /// The period for which a tip remains open after is has achieved threshold tippers. + const TipCountdown: T::BlockNumber = T::TipCountdown::get(); + + /// The amount of the final tip which goes to the original reporter of the tip. + const TipFindersFee: Percent = T::TipFindersFee::get(); + + /// The amount held on deposit for placing a tip report. + const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); + + /// The amount held on deposit per byte within the tip report reason. + const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); + + /// Maximum acceptable reason length. + const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); + + type Error = Error; + + fn deposit_event() = default; + + /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `DataDepositPerByte` for each byte in `reason`. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - Complexity: `O(R)` where `R` length of `reason`. + /// - encoding and hashing of 'reason' + /// - DbReads: `Reasons`, `Tips` + /// - DbWrites: `Reasons`, `Tips` + /// # + #[weight = ::WeightInfo::report_awesome(reason.len() as u32)] + fn report_awesome(origin, reason: Vec, who: T::AccountId) { + let finder = ensure_signed(origin)?; + + ensure!(reason.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); + + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); + + let deposit = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * (reason.len() as u32).into(); + T::Currency::reserve(&finder, deposit)?; + + Reasons::::insert(&reason_hash, &reason); + let tip = OpenTip { + reason: reason_hash, + who, + finder, + deposit, + closes: None, + tips: vec![], + finders_fee: true + }; + Tips::::insert(&hash, tip); + Self::deposit_event(RawEvent::NewTip(hash)); + } + + /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. + /// + /// If successful, the original deposit will be unreserved. + /// + /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` + /// must have been reported by the signing account through `report_awesome` (and not + /// through `tip_new`). + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// Emits `TipRetracted` if successful. + /// + /// # + /// - Complexity: `O(1)` + /// - Depends on the length of `T::Hash` which is fixed. + /// - DbReads: `Tips`, `origin account` + /// - DbWrites: `Reasons`, `Tips`, `origin account` + /// # + #[weight = ::WeightInfo::retract_tip()] + fn retract_tip(origin, hash: T::Hash) { + let who = ensure_signed(origin)?; + let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; + ensure!(tip.finder == who, Error::::NotFinder); + + Reasons::::remove(&tip.reason); + Tips::::remove(&hash); + if !tip.deposit.is_zero() { + let _ = T::Currency::unreserve(&who, tip.deposit); + } + Self::deposit_event(RawEvent::TipRetracted(hash)); + } + + /// Give a tip for something new; no finder's fee will be taken. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - Complexity: `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. + /// - `O(T)`: decoding `Tipper` vec of length `T` + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// - `O(R)`: hashing and encoding of reason of length `R` + /// - DbReads: `Tippers`, `Reasons` + /// - DbWrites: `Reasons`, `Tips` + /// # + #[weight = ::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32)] + fn tip_new(origin, reason: Vec, who: T::AccountId, #[compact] tip_value: BalanceOf) { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + + Reasons::::insert(&reason_hash, &reason); + Self::deposit_event(RawEvent::NewTip(hash.clone())); + let tips = vec![(tipper.clone(), tip_value)]; + let tip = OpenTip { + reason: reason_hash, + who, + finder: tipper, + deposit: Zero::zero(), + closes: None, + tips, + finders_fee: false, + }; + Tips::::insert(&hash, tip); + } + + /// Declare a tip value for an already-open tip. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary + /// account ID. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period + /// has started. + /// + /// # + /// - Complexity: `O(T)` where `T` is the number of tippers. + /// decoding `Tipper` vec of length `T`, insert tip and check closing, + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// + /// Actually weight could be lower as it depends on how many tips are in `OpenTip` but it + /// is weighted as if almost full i.e of length `T-1`. + /// - DbReads: `Tippers`, `Tips` + /// - DbWrites: `Tips` + /// # + #[weight = ::WeightInfo::tip(T::Tippers::max_len() as u32)] + fn tip(origin, hash: T::Hash, #[compact] tip_value: BalanceOf) { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + + let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { + Self::deposit_event(RawEvent::TipClosing(hash.clone())); + } + Tips::::insert(&hash, tip); + } + + /// Close and payout a tip. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// The tip identified by `hash` must have finished its countdown period. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// # + /// - Complexity: `O(T)` where `T` is the number of tippers. + /// decoding `Tipper` vec of length `T`. + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// - DbReads: `Tips`, `Tippers`, `tip finder` + /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` + /// # + #[weight = ::WeightInfo::close_tip(T::Tippers::max_len() as u32)] + fn close_tip(origin, hash: T::Hash) { + ensure_signed(origin)?; + + let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; + ensure!(system::Module::::block_number() >= *n, Error::::Premature); + // closed. + Reasons::::remove(&tip.reason); + Tips::::remove(hash); + Self::payout_tip(hash, tip); + } + } +} + +impl Module { + // Add public immutables and private mutables. + + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::ModuleId::get().into_account() + } + + /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it + /// closes, if so, then deposit the relevant event and set closing accordingly. + /// + /// `O(T)` and one storage access. + fn insert_tip_and_check_closing( + tip: &mut OpenTip, T::BlockNumber, T::Hash>, + tipper: T::AccountId, + tip_value: BalanceOf, + ) -> bool { + match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { + Ok(pos) => tip.tips[pos] = (tipper, tip_value), + Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), + } + Self::retain_active_tips(&mut tip.tips); + let threshold = (T::Tippers::count() + 1) / 2; + if tip.tips.len() >= threshold && tip.closes.is_none() { + tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); + true + } else { + false + } + } + + /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. + fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { + let members = T::Tippers::sorted_members(); + let mut members_iter = members.iter(); + let mut member = members_iter.next(); + tips.retain(|(ref a, _)| loop { + match member { + None => break false, + Some(m) if m > a => break false, + Some(m) => { + member = members_iter.next(); + if m < a { + continue + } else { + break true; + } + } + } + }); + } + + /// Execute the payout of a tip. + /// + /// Up to three balance operations. + /// Plus `O(T)` (`T` is Tippers length). + fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { + let mut tips = tip.tips; + Self::retain_active_tips(&mut tips); + tips.sort_by_key(|i| i.1); + + let treasury = Self::account_id(); + let max_payout = pallet_treasury::Module::::pot(); + + let mut payout = tips[tips.len() / 2].1.min(max_payout); + if !tip.deposit.is_zero() { + let _ = T::Currency::unreserve(&tip.finder, tip.deposit); + } + + if tip.finders_fee && tip.finder != tip.who { + // pay out the finder's fee. + let finders_fee = T::TipFindersFee::get() * payout; + payout -= finders_fee; + // this should go through given we checked it's at most the free balance, but still + // we only make a best-effort. + let _ = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); + } + + // same as above: best-effort only. + let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); + Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); + } + + pub fn migrate_retract_tip_for_tip_new() { + /// An open tipping "motion". Retains all details of a tip including information on the finder + /// and the members who have voted. + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + use frame_support::{Twox64Concat, migration::StorageKeyIterator}; + + if_std! { + println!("Inside migrate_retract_tip_for_tip_new()!"); + } + + for (hash, old_tip) in StorageKeyIterator::< + T::Hash, + OldOpenTip, T::BlockNumber, T::Hash>, + Twox64Concat, + >::new(b"Treasury", b"Tips").drain() + { + + if_std! { + println!("Inside loop migrate_retract_tip_for_tip_new()!"); + } + + let (finder, deposit, finders_fee) = match old_tip.finder { + Some((finder, deposit)) => { + if_std! { + // This code is only being compiled and executed when the `std` feature is enabled. + println!("OK case!"); + println!("value is: {:#?},{:#?}", finder, deposit); + } + (finder, deposit, true) + }, + None => { + if_std! { + // This code is only being compiled and executed when the `std` feature is enabled. + println!("None case!"); + // println!("value is: {:#?},{:#?}", T::AccountId::default(), Zero::zero()); + } + (T::AccountId::default(), Zero::zero(), false) + }, + }; + let new_tip = OpenTip { + reason: old_tip.reason, + who: old_tip.who, + finder, + deposit, + closes: old_tip.closes, + tips: old_tip.tips, + finders_fee + }; + Tips::::insert(hash, new_tip) + } + + if_std! { + println!("Exit migrate_retract_tip_for_tip_new()!"); + } + + } +} diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..e6f9cd4e66b7751695c9e144b26d5448085ffb5b --- /dev/null +++ b/frame/tips/src/tests.rs @@ -0,0 +1,465 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Treasury pallet tests. + +#![cfg(test)] + +use super::*; +use std::cell::RefCell; +use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, parameter_types, weights::Weight, + impl_outer_event, traits::{Contains} +}; +use sp_runtime::{Permill}; +use sp_core::H256; +use sp_runtime::{ + Perbill, ModuleId, + testing::Header, + traits::{BlakeTwo256, IdentityLookup, BadOrigin}, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +mod tips { + // Re-export needed for `impl_outer_event!`. + pub use crate::*; +} + +impl_outer_event! { + pub enum Event for Test { + system, + pallet_balances, + pallet_treasury, + tips, + } +} + +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} +pub struct TenToFourteen; +impl Contains for TenToFourteen { + fn sorted_members() -> Vec { + TEN_TO_FOURTEEN.with(|v| { + v.borrow().clone() + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn add(new: &u128) { + TEN_TO_FOURTEEN.with(|v| { + let mut members = v.borrow_mut(); + members.push(*new); + members.sort(); + }) + } +} +impl ContainsLengthBound for TenToFourteen { + fn max_len() -> usize { + TEN_TO_FOURTEEN.with(|v| v.borrow().len()) + } + fn min_len() -> usize { 0 } +} +parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const DataDepositPerByte: u64 = 1; + pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const MaximumReasonLength: u32 = 16384; +} +impl pallet_treasury::Config for Test { + type ModuleId = TreasuryModuleId; + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = (); +} +parameter_types! { + pub const TipCountdown: u64 = 1; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: u64 = 1; +} +impl Config for Test { + type MaximumReasonLength = MaximumReasonLength; + type Tippers = TenToFourteen; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type DataDepositPerByte = DataDepositPerByte; + type Event = Event; + type WeightInfo = (); +} +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Treasury = pallet_treasury::Module; +type TipsModTestInst = Module; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + t.into() +} + +fn last_event() -> RawEvent { + System::events().into_iter().map(|r| r.event) + .filter_map(|e| { + if let Event::tips(inner) = e { Some(inner) } else { None } + }) + .last() + .unwrap() +} + +#[test] +fn genesis_config_works() { + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); +} + +fn tip_hash() -> H256 { + BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128)) +} + +#[test] +fn tip_new_cannot_be_used_twice() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_noop!( + TipsModTestInst::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), + Error::::AlreadyKnown + ); + }); +} + +#[test] +fn report_awesome_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + + // other reports don't count. + assert_noop!( + TipsModTestInst::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Error::::AlreadyKnown + ); + + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(TipsModTestInst::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 102); + assert_eq!(Balances::free_balance(3), 8); + }); +} + +#[test] +fn report_awesome_from_beneficiary_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 110); + }); +} + +#[test] +fn close_tip_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + + let h = tip_hash(); + + assert_eq!(last_event(), RawEvent::NewTip(h)); + + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + + assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); + + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + + assert_eq!(last_event(), RawEvent::TipClosing(h)); + + assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::Premature); + + System::set_block_number(2); + assert_noop!(TipsModTestInst::close_tip(Origin::none(), h.into()), BadOrigin); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + + assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10)); + + assert_noop!(TipsModTestInst::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn retract_tip_works() { + new_test_ext().execute_with(|| { + // with report awesome + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(TipsModTestInst::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); + assert_ok!(TipsModTestInst::retract_tip(Origin::signed(0), h.clone())); + System::set_block_number(2); + assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + + // with tip new + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(TipsModTestInst::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); + assert_ok!(TipsModTestInst::retract_tip(Origin::signed(10), h.clone())); + System::set_block_number(2); + assert_noop!(TipsModTestInst::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn tip_median_calculation_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 1000000)); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn tip_changing_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(TipsModTestInst::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); + let h = tip_hash(); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10000)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10000)); + assert_ok!(TipsModTestInst::tip(Origin::signed(13), h.clone(), 0)); + assert_ok!(TipsModTestInst::tip(Origin::signed(14), h.clone(), 0)); + assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 1000)); + assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 100)); + assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(TipsModTestInst::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn test_last_reward_migration() { + use sp_storage::Storage; + + let mut s = Storage::default(); + + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + let reason1 = BlakeTwo256::hash(b"reason1"); + let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); + + let old_tip_finder = OldOpenTip:: { + reason: reason1, + who: 10, + finder: Some((20, 30)), + closes: Some(13), + tips: vec![(40, 50), (60, 70)] + }; + + let reason2 = BlakeTwo256::hash(b"reason2"); + let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); + + let old_tip_no_finder = OldOpenTip:: { + reason: reason2, + who: 20, + finder: None, + closes: Some(13), + tips: vec![(40, 50), (60, 70)] + }; + + let data = vec![ + ( + Tips::::hashed_key_for(hash1), + old_tip_finder.encode().to_vec() + ), + ( + Tips::::hashed_key_for(hash2), + old_tip_no_finder.encode().to_vec() + ), + ]; + + s.top = data.into_iter().collect(); + + println!("Executing the test!"); + + sp_io::TestExternalities::new(s).execute_with(|| { + + println!("Calling migrate_retract_tip_for_tip_new()!"); + + TipsModTestInst::migrate_retract_tip_for_tip_new(); + + // Test w/ finder + assert_eq!( + Tips::::get(hash1), + Some(OpenTip { + reason: reason1, + who: 10, + finder: 20, + deposit: 30, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: true, + }) + ); + + // Test w/o finder + assert_eq!( + Tips::::get(hash2), + Some(OpenTip { + reason: reason2, + who: 20, + finder: Default::default(), + deposit: 0, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: false, + }) + ); + }); +} + +#[test] +fn genesis_funding_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let initial_funding = 100; + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized with 100. + balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], + }.assimilate_storage(&mut t).unwrap(); + pallet_treasury::GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), initial_funding); + assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); + }); +} diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad2d3104cafe5168872a5be50ef537ac08724fd1 --- /dev/null +++ b/frame/tips/src/weights.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_tips +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 +//! DATE: 2020-12-16, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_tips +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/tips/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_tips. +pub trait WeightInfo { + fn report_awesome(r: u32, ) -> Weight; + fn retract_tip() -> Weight; + fn tip_new(r: u32, t: u32, ) -> Weight; + fn tip(t: u32, ) -> Weight; + fn close_tip(t: u32, ) -> Weight; +} + +/// Weights for pallet_tips using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn report_awesome(r: u32, ) -> Weight { + (74_814_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn retract_tip() -> Weight { + (62_962_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn tip_new(r: u32, t: u32, ) -> Weight { + (48_132_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((155_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn tip(t: u32, ) -> Weight { + (36_168_000 as Weight) + // Standard Error: 1_000 + .saturating_add((695_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn close_tip(t: u32, ) -> Weight { + (119_313_000 as Weight) + // Standard Error: 1_000 + .saturating_add((372_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn report_awesome(r: u32, ) -> Weight { + (74_814_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn retract_tip() -> Weight { + (62_962_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn tip_new(r: u32, t: u32, ) -> Weight { + (48_132_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((155_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn tip(t: u32, ) -> Weight { + (36_168_000 as Weight) + // Standard Error: 1_000 + .saturating_add((695_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn close_tip(t: u32, ) -> Weight { + (119_313_000 as Weight) + // Standard Error: 1_000 + .saturating_add((372_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } +} diff --git a/frame/transaction-payment/README.md b/frame/transaction-payment/README.md index 10ad9579e92b7e66b8a87af0e9962cfc4ee77ae8..7e95677a1b272fdbcaa6b53a8c0e5850be978d51 100644 --- a/frame/transaction-payment/README.md +++ b/frame/transaction-payment/README.md @@ -8,9 +8,9 @@ transaction to be included. This includes: chance to be included by the transaction queue. Additionally, this module allows one to configure: - - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. + - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. - A means of updating the fee for the next block, via defining a multiplier, based on the final state of the chain at the end of the previous block. This can be configured via - [`Trait::FeeMultiplierUpdate`] + [`Config::FeeMultiplierUpdate`] License: Apache-2.0 \ No newline at end of file diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 751aa57da0f80542936a1f3b2ffbb1e23a2fb58b..e530380dfbb48801a241545b2c8ad4671f49ff41 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -25,11 +25,11 @@ //! chance to be included by the transaction queue. //! //! Additionally, this module allows one to configure: -//! - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. +//! - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. //! - A means of updating the fee for the next block, via defining a multiplier, based on the //! final state of the chain at the end of the previous block. This can be configured via -//! [`Trait::FeeMultiplierUpdate`] -//! - How the fees are paid via [`Trait::OnChargeTransaction`]. +//! [`Config::FeeMultiplierUpdate`] +//! - How the fees are paid via [`Config::OnChargeTransaction`]. #![cfg_attr(not(feature = "std"), no_std)] @@ -40,7 +40,7 @@ use frame_support::{ traits::Get, weights::{ Weight, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Pays, WeightToFeePolynomial, - WeightToFeeCoefficient, + WeightToFeeCoefficient, DispatchClass, }, dispatch::DispatchResult, }; @@ -63,7 +63,7 @@ pub use payment::*; pub type Multiplier = FixedU128; type BalanceOf = - <::OnChargeTransaction as OnChargeTransaction>::Balance; + <::OnChargeTransaction as OnChargeTransaction>::Balance; /// A struct to update the weight multiplier per block. It implements `Convert`, meaning that it can convert the previous multiplier to the next one. This should @@ -109,7 +109,7 @@ type BalanceOf = /// Meaning that fees can change by around ~23% per day, given extreme congestion. /// /// More info can be found at: -/// https://w3f-research.readthedocs.io/en/latest/polkadot/Token%20Economics.html +/// pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); /// Something that can convert the current multiplier to the next one. @@ -135,7 +135,7 @@ impl MultiplierUpdate for () { } impl MultiplierUpdate for TargetedFeeAdjustment - where T: frame_system::Trait, S: Get, V: Get, M: Get, + where T: frame_system::Config, S: Get, V: Get, M: Get, { fn min() -> Multiplier { M::get() @@ -149,7 +149,7 @@ impl MultiplierUpdate for TargetedFeeAdjustment } impl Convert for TargetedFeeAdjustment - where T: frame_system::Trait, S: Get, V: Get, M: Get, + where T: frame_system::Config, S: Get, V: Get, M: Get, { fn convert(previous: Multiplier) -> Multiplier { // Defensive only. The multiplier in storage should always be at most positive. Nonetheless @@ -158,14 +158,14 @@ impl Convert for TargetedFeeAdjustment::AvailableBlockRatio::get() * - ::MaximumBlockWeight::get(); - let normal_block_weight = - >::block_weight() - .get(frame_support::weights::DispatchClass::Normal) - .min(normal_max_weight); + let normal_max_weight = weights.get(DispatchClass::Normal).max_total + .unwrap_or_else(|| weights.max_block); + let current_block_weight = >::block_weight(); + let normal_block_weight = *current_block_weight + .get(DispatchClass::Normal) + .min(&normal_max_weight); let s = S::get(); let v = V::get(); @@ -213,7 +213,7 @@ impl Default for Releases { } } -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// Handler for withdrawing, refunding and depositing the transaction fee. /// Transaction fees are withdrawn before the transaction is executed. /// After the transaction was executed the transaction weight can be @@ -233,7 +233,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as TransactionPayment { + trait Store for Module as TransactionPayment { pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::saturating_from_integer(1); StorageVersion build(|_: &GenesisConfig| Releases::V2): Releases; @@ -241,7 +241,7 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// The fee to be paid for making a transaction; the per-byte portion. const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); @@ -257,13 +257,13 @@ decl_module! { fn integrity_test() { // given weight == u64, we build multipliers from `diff` of two weight values, which can - // at most be MaximumBlockWeight. Make sure that this can fit in a multiplier without + // at most be maximum block weight. Make sure that this can fit in a multiplier without // loss. use sp_std::convert::TryInto; assert!( ::max_value() >= Multiplier::checked_from_integer( - ::MaximumBlockWeight::get().try_into().unwrap() + T::BlockWeights::get().max_block.try_into().unwrap() ).unwrap(), ); @@ -272,9 +272,11 @@ decl_module! { // that if we collapse to minimum, the trend will be positive with a weight value // which is 1% more than the target. let min_value = T::FeeMultiplierUpdate::min(); - let mut target = - T::FeeMultiplierUpdate::target() * - (T::AvailableBlockRatio::get() * T::MaximumBlockWeight::get()); + let mut target = T::FeeMultiplierUpdate::target() * + T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( + "Setting `max_total` for `Normal` dispatch class is not compatible with \ + `transaction-payment` pallet." + ); // add 1 percent; let addition = target / 100; @@ -285,7 +287,7 @@ decl_module! { target += addition; sp_io::TestExternalities::new_empty().execute_with(|| { - >::set_block_limits(target, 0); + >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); assert!(next > min_value, "The minimum bound of the multiplier is too low. When \ block saturation is more than target by 1% and multiplier is minimal then \ @@ -296,7 +298,7 @@ decl_module! { } } -impl Module where +impl Module where BalanceOf: FixedPointOperand { /// Query the data that we know about the fee of a given `call`. @@ -357,7 +359,13 @@ impl Module where ) -> BalanceOf where T::Call: Dispatchable, { - Self::compute_fee_raw(len, info.weight, tip, info.pays_fee) + Self::compute_fee_raw( + len, + info.weight, + tip, + info.pays_fee, + info.class, + ) } /// Compute the actual post dispatch fee for a particular transaction. @@ -372,7 +380,13 @@ impl Module where ) -> BalanceOf where T::Call: Dispatchable, { - Self::compute_fee_raw(len, post_info.calc_actual_weight(info), tip, post_info.pays_fee(info)) + Self::compute_fee_raw( + len, + post_info.calc_actual_weight(info), + tip, + post_info.pays_fee(info), + info.class, + ) } fn compute_fee_raw( @@ -380,6 +394,7 @@ impl Module where weight: Weight, tip: BalanceOf, pays_fee: Pays, + class: DispatchClass, ) -> BalanceOf { if pays_fee == Pays::Yes { let len = >::from(len); @@ -394,7 +409,7 @@ impl Module where // final adjusted weight fee. let adjusted_weight_fee = multiplier.saturating_mul_int(unadjusted_weight_fee); - let base_fee = Self::weight_to_fee(T::ExtrinsicBaseWeight::get()); + let base_fee = Self::weight_to_fee(T::BlockWeights::get().get(class).base_extrinsic); base_fee .saturating_add(fixed_len_fee) .saturating_add(adjusted_weight_fee) @@ -407,13 +422,13 @@ impl Module where fn weight_to_fee(weight: Weight) -> BalanceOf { // cap the weight to the maximum defined in runtime, otherwise it will be the // `Bounded` maximum of its data type, which is not desired. - let capped_weight = weight.min(::MaximumBlockWeight::get()); + let capped_weight = weight.min(T::BlockWeights::get().max_block); T::WeightToFee::calc(&capped_weight) } } impl Convert> for Module where - T: Trait, + T: Config, BalanceOf: FixedPointOperand, { /// Compute the fee for the specified weight. @@ -429,9 +444,9 @@ impl Convert> for Module where /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); +pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); -impl ChargeTransactionPayment where +impl ChargeTransactionPayment where T::Call: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { @@ -449,14 +464,14 @@ impl ChargeTransactionPayment where ) -> Result< ( BalanceOf, - <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ), TransactionValidityError, > { let tip = self.0; let fee = Module::::compute_fee(len as u32, info, tip); - <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee(who, call, info, fee, tip) + <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee(who, call, info, fee, tip) .map(|i| (fee, i)) } @@ -471,14 +486,15 @@ impl ChargeTransactionPayment where /// that the transaction which consumes more resources (either length or weight) with the same /// `fee` ends up having lower priority. fn get_priority(len: usize, info: &DispatchInfoOf, final_fee: BalanceOf) -> TransactionPriority { - let weight_saturation = T::MaximumBlockWeight::get() / info.weight.max(1); - let len_saturation = T::MaximumBlockLength::get() as u64 / (len as u64).max(1); + let weight_saturation = T::BlockWeights::get().max_block / info.weight.max(1); + let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Normal); + let len_saturation = max_block_length as u64 / (len as u64).max(1); let coefficient: BalanceOf = weight_saturation.min(len_saturation).saturated_into::>(); final_fee.saturating_mul(coefficient).saturated_into::() } } -impl sp_std::fmt::Debug for ChargeTransactionPayment { +impl sp_std::fmt::Debug for ChargeTransactionPayment { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "ChargeTransactionPayment<{:?}>", self.0) @@ -489,7 +505,7 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment } } -impl SignedExtension for ChargeTransactionPayment where +impl SignedExtension for ChargeTransactionPayment where BalanceOf: Send + Sync + From + FixedPointOperand, T::Call: Dispatchable, { @@ -503,7 +519,7 @@ impl SignedExtension for ChargeTransactionPayment whe // who paid the fee Self::AccountId, // imbalance resulting from withdrawing the fee - <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } @@ -571,9 +587,10 @@ mod tests { traits::{BlakeTwo256, IdentityLookup}, Perbill, }; + use std::cell::RefCell; use smallvec::smallvec; - const CALL: &::Call = + const CALL: &::Call = &Call::Balances(BalancesCall::transfer(2, 69)); impl_outer_dispatch! { @@ -598,18 +615,36 @@ mod tests { pub enum Origin for Runtime {} } + thread_local! { + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); + } + + pub struct BlockWeights; + impl Get for BlockWeights { + fn get() -> frame_system::limits::BlockWeights { + frame_system::limits::BlockWeights::builder() + .base_block(0) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = 1024.into(); + }) + .build_or_panic() + } + } + parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub static ExtrinsicBaseWeight: u64 = 0; pub static TransactionByteFee: u64 = 1; pub static WeightToFee: u64 = 1; } - impl frame_system::Trait for Runtime { + impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -621,13 +656,6 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -640,7 +668,7 @@ mod tests { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Runtime { + impl pallet_balances::Config for Runtime { type Balance = u64; type Event = Event; type DustRemoval = (); @@ -663,7 +691,7 @@ mod tests { } } - impl Trait for Runtime { + impl Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; @@ -841,7 +869,7 @@ mod tests { // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - ::MaximumBlockWeight::get()) as u64 + (10000 - ::BlockWeights::get().max_block) as u64 ); }); } @@ -939,7 +967,7 @@ mod tests { partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ - + info.weight.min(MaximumBlockWeight::get()) as u64 * 2 * 3 / 2 /* weight */ + + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ }, ); diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index de39215b575be816d701db9a838e7a1dade5e300..f84b19d78c297d8cc1ce258b2b0e1f50d42d203b 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -1,5 +1,5 @@ ///! Traits and default implementation for paying transaction fees. -use crate::Trait; +use crate::Config; use codec::FullCodec; use frame_support::{ traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, WithdrawReasons}, @@ -12,10 +12,10 @@ use sp_runtime::{ use sp_std::{fmt::Debug, marker::PhantomData}; type NegativeImbalanceOf = - ::AccountId>>::NegativeImbalance; + ::AccountId>>::NegativeImbalance; /// Handle withdrawing, refunding and depositing of transaction fees. -pub trait OnChargeTransaction { +pub trait OnChargeTransaction { /// The underlying integer type in which fees are calculated. type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default; type LiquidityInfo: Default; @@ -55,17 +55,17 @@ pub struct CurrencyAdapter(PhantomData<(C, OU)>); /// Default implementation for a Currency and an OnUnbalanced handler. impl OnChargeTransaction for CurrencyAdapter where - T: Trait, - T::TransactionByteFee: Get<::AccountId>>::Balance>, - C: Currency<::AccountId>, + T: Config, + T::TransactionByteFee: Get<::AccountId>>::Balance>, + C: Currency<::AccountId>, C::PositiveImbalance: - Imbalance<::AccountId>>::Balance, Opposite = C::NegativeImbalance>, + Imbalance<::AccountId>>::Balance, Opposite = C::NegativeImbalance>, C::NegativeImbalance: - Imbalance<::AccountId>>::Balance, Opposite = C::PositiveImbalance>, + Imbalance<::AccountId>>::Balance, Opposite = C::PositiveImbalance>, OU: OnUnbalanced>, { type LiquidityInfo = Option>; - type Balance = ::AccountId>>::Balance; + type Balance = ::AccountId>>::Balance; /// Withdraw the predicted fee from the transaction origin. /// diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index fd2d103e9f3350b22c3d55ea40cb37e18b1d5d58..7570d2a499c33068755d74d5c5bf28e5c602cf8e 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -20,6 +20,7 @@ sp-runtime = { version = "2.0.0", default-features = false, path = "../../primit frame-support = { version = "2.0.0", default-features = false, path = "../support" } frame-system = { version = "2.0.0", default-features = false, path = "../system" } pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } +impl-trait-for-tuples = "0.1.3" frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } diff --git a/frame/treasury/README.md b/frame/treasury/README.md index 424b8e0eedf9952717ad1d2c75c3b7e01e5dcc70..4b061359fea759f6a7f754290b4babeb3ee8dcd7 100644 --- a/frame/treasury/README.md +++ b/frame/treasury/README.md @@ -1,118 +1,31 @@ # Treasury Module -The Treasury module provides a "pot" of funds that can be managed by stakeholders in the -system and a structure for making spending proposals from this pot. - -- [`treasury::Trait`](https://docs.rs/pallet-treasury/latest/pallet_treasury/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-treasury/latest/pallet_treasury/enum.Call.html) +The Treasury module provides a "pot" of funds that can be managed by stakeholders in the system and +a structure for making spending proposals from this pot. ## Overview -The Treasury Module itself provides the pot to store funds, and a means for stakeholders to -propose, approve, and deny expenditures. The chain will need to provide a method (e.g. -inflation, fees) for collecting funds. - -By way of example, the Council could vote to fund the Treasury with a portion of the block -reward and use the funds to pay developers. - -### Tipping - -A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be -given without first having a pre-determined stakeholder group come to consensus on how much -should be paid. - -A group of `Tippers` is determined through the config `Trait`. After half of these have declared -some amount that they believe a particular reported reason deserves, then a countdown period is -entered where any remaining members can declare their tip amounts also. After the close of the -countdown period, the median of all declared tips is paid to the reported beneficiary, along -with any finders fee, in case of a public (and bonded) original report. - -### Bounty - -A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that -needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after -the bounty is approved and funded by Council, to be delegated -with the responsibility of assigning a payout address once the specified set of objectives is completed. - -After the Council has activated a bounty, it delegates the work that requires expertise to a curator -in exchange of a deposit. Once the curator accepts the bounty, they -get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout -address, the curator fee and the return of the curator deposit. The -delay allows for intervention through regular democracy. The Council gets to unassign the curator, -resulting in a new curator election. The Council also gets to cancel -the bounty if deemed necessary before assigning a curator or once the bounty is active or payout -is pending, resulting in the slash of the curator's deposit. +The Treasury Module itself provides the pot to store funds, and a means for stakeholders to propose, +approve, and deny expenditures. The chain will need to provide a method (e.g.inflation, fees) for +collecting funds. +By way of example, the Council could vote to fund the Treasury with a portion of the block reward +and use the funds to pay developers. ### Terminology - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. -- **Beneficiary:** An account who will receive the funds from a proposal iff -the proposal is approved. -- **Deposit:** Funds that a proposer must lock when making a proposal. The -deposit will be returned or slashed if the proposal is approved or rejected -respectively. +- **Beneficiary:** An account who will receive the funds from a proposal if the proposal is + approved. +- **Deposit:** Funds that a proposer must lock when making a proposal. The deposit will be returned + or slashed if the proposal is approved or rejected respectively. - **Pot:** Unspent funds accumulated by the treasury module. -Tipping protocol: -- **Tipping:** The process of gathering declarations of amounts to tip and taking the median - amount to be transferred from the treasury to a beneficiary account. -- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a - particular individual (identified by an account ID) is worthy of a recognition by the - treasury. -- **Finder:** The original public reporter of some reason for tipping. -- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, - rather than the main beneficiary. - -Bounty: -- **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by -the Treasury. -- **Proposer:** An account proposing a bounty spending. -- **Curator:** An account managing the bounty and assigning a payout address receiving the reward -for the completion of work. -- **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on -deposit per byte within the bounty description. -- **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit -is returned when/if the bounty is completed. -- **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is -rewarded. -- **Payout address:** The account to which the total or part of the bounty is assigned to. -- **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. -- **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. - ## Interface ### Dispatchable Functions General spending/proposal protocol: - `propose_spend` - Make a spending proposal and stake the required deposit. -- `set_pot` - Set the spendable balance of funds. -- `configure` - Configure the module's proposal requirements. - `reject_proposal` - Reject a proposal, slashing the deposit. - `approve_proposal` - Accept the proposal, returning the deposit. - -Tipping protocol: -- `report_awesome` - Report something worthy of a tip and register for a finders fee. -- `retract_tip` - Retract a previous (finders fee registered) report. -- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. -- `tip` - Declare or redeclare an amount to tip for a particular reason. -- `close_tip` - Close and pay out a tip. - -Bounty protocol: -- `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of -tasks and stake the required deposit. -- `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. -- `propose_curator` - Assign an account to a bounty as candidate curator. -- `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. -- `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. -- `award_bounty` - Close and pay out the specified amount for the completed work. -- `claim_bounty` - Claim a specific bounty amount from the Payout Address. -- `unassign_curator` - Unassign an accepted curator from a specific earmark. -- `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. - - -## GenesisConfig - -The Treasury module depends on the [`GenesisConfig`](https://docs.rs/pallet-treasury/latest/pallet_treasury/struct.GenesisConfig.html). - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 2794e6cc432039306ceaf9521121b81f31386731..16ed1b01ae0d0ad6fe530d2ff92d6b79bd7da8b1 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks_instance, account}; use frame_support::traits::OnInitialize; use crate::Module as Treasury; @@ -30,7 +30,7 @@ use crate::Module as Treasury; const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. -fn setup_proposal, I: Instance>(u: u32) -> ( +fn setup_proposal, I: Instance>(u: u32) -> ( T::AccountId, BalanceOf, ::Source, @@ -43,58 +43,8 @@ fn setup_proposal, I: Instance>(u: u32) -> ( (caller, value, beneficiary_lookup) } -// Create the pre-requisite information needed to create a `report_awesome`. -fn setup_awesome, I: Instance>(length: u32) -> (T::AccountId, Vec, T::AccountId) { - let caller = whitelisted_caller(); - let value = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * length.into() - + T::Currency::minimum_balance(); - let _ = T::Currency::make_free_balance_be(&caller, value); - let reason = vec![0; length as usize]; - let awesome_person = account("awesome", 0, SEED); - (caller, reason, awesome_person) -} - -// Create the pre-requisite information needed to call `tip_new`. -fn setup_tip, I: Instance>(r: u32, t: u32) -> - Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> -{ - let tippers_count = T::Tippers::count(); - - for i in 0 .. t { - let member = account("member", i, SEED); - T::Tippers::add(&member); - ensure!(T::Tippers::contains(&member), "failed to add tipper"); - } - - ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); - let caller = account("member", t - 1, SEED); - let reason = vec![0; r as usize]; - let beneficiary = account("beneficiary", t, SEED); - let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - Ok((caller, reason, beneficiary, value)) -} - -// Create `t` new tips for the tip proposal with `hash`. -// This function automatically makes the tip able to close. -fn create_tips, I: Instance>(t: u32, hash: T::Hash, value: BalanceOf) -> - Result<(), &'static str> -{ - for i in 0 .. t { - let caller = account("member", i, SEED); - ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); - Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; - } - Tips::::mutate(hash, |maybe_tip| { - if let Some(open_tip) = maybe_tip { - open_tip.closes = Some(T::BlockNumber::zero()); - } - }); - Ok(()) -} - // Create proposals that are approved for use in `on_initialize`. -fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'static str> { +fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'static str> { for i in 0 .. n { let (caller, value, lookup) = setup_proposal::(i); Treasury::::propose_spend( @@ -109,61 +59,12 @@ fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'s Ok(()) } -// Create bounties that are approved for use in `on_initialize`. -fn create_approved_bounties, I: Instance>(n: u32) -> Result<(), &'static str> { - for i in 0 .. n { - let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - } - ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); - Ok(()) -} - -// Create the pre-requisite information needed to create a treasury `propose_bounty`. -fn setup_bounty, I: Instance>(u: u32, d: u32) -> ( - T::AccountId, - T::AccountId, - BalanceOf, - BalanceOf, - Vec, -) { - let caller = account("caller", u, SEED); - let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); - let fee = value / 2u32.into(); - let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * MAX_BYTES.into(); - let _ = T::Currency::make_free_balance_be(&caller, deposit); - let curator = account("curator", u, SEED); - let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); - let reason = vec![0; d as usize]; - (caller, curator, fee, value, reason) -} - -fn create_bounty, I: Instance>() -> Result<( - ::Source, - BountyIndex, -), &'static str> { - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; - Treasury::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; - Ok((curator_lookup, bounty_id)) -} - -fn setup_pod_account, I: Instance>() { +fn setup_pod_account, I: Instance>() { let pot_account = Treasury::::account_id(); let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); let _ = T::Currency::make_free_balance_be(&pot_account, value); } -const MAX_BYTES: u32 = 16384; -const MAX_TIPPERS: u32 = 100; - benchmarks_instance! { _ { } @@ -194,178 +95,6 @@ benchmarks_instance! { let proposal_id = Treasury::::proposal_count() - 1; }: _(RawOrigin::Root, proposal_id) - report_awesome { - let r in 0 .. MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, awesome_person) - - retract_tip { - let r = MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - Treasury::::report_awesome( - RawOrigin::Signed(caller.clone()).into(), - reason.clone(), - awesome_person.clone() - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash) - - tip_new { - let r in 0 .. MAX_BYTES; - let t in 1 .. MAX_TIPPERS; - - let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, beneficiary, value) - - tip { - let t in 1 .. MAX_TIPPERS; - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t - 1, hash.clone(), value)?; - let caller = account("member", t - 1, SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash, value) - - close_tip { - let t in 1 .. MAX_TIPPERS; - - // Make sure pot is funded - setup_pod_account::(); - - // Set up a new tip proposal - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - - // Create a bunch of tips - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t, hash.clone(), value)?; - - let caller = account("caller", t, SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash) - - propose_bounty { - let d in 0 .. MAX_BYTES; - - let (caller, curator, fee, value, description) = setup_bounty::(0, d); - }: _(RawOrigin::Signed(caller), value, description) - - approve_bounty { - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - }: _(RawOrigin::Root, bounty_id) - - propose_curator { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) - - // Worst case when curator is inactive and any sender unassigns the curator. - unassign_curator { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; - frame_system::Module::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); - let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), bounty_id) - - accept_curator { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; - }: _(RawOrigin::Signed(curator), bounty_id) - - award_bounty { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); - }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) - - claim_bounty { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - - let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); - Treasury::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; - - frame_system::Module::::set_block_number(T::BountyDepositPayoutDelay::get()); - - }: _(RawOrigin::Signed(curator), bounty_id) - - close_bounty_proposed { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) - - close_bounty_active { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) - - extend_bounty_expiry { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) - on_initialize_proposals { let p in 0 .. 100; setup_pod_account::(); @@ -373,14 +102,6 @@ benchmarks_instance! { }: { Treasury::::on_initialize(T::BlockNumber::zero()); } - - on_initialize_bounties { - let b in 0 .. 100; - setup_pod_account::(); - create_approved_bounties::(b)?; - }: { - Treasury::::on_initialize(T::BlockNumber::zero()); - } } #[cfg(test)] @@ -395,23 +116,7 @@ mod tests { assert_ok!(test_benchmark_propose_spend::()); assert_ok!(test_benchmark_reject_proposal::()); assert_ok!(test_benchmark_approve_proposal::()); - assert_ok!(test_benchmark_report_awesome::()); - assert_ok!(test_benchmark_retract_tip::()); - assert_ok!(test_benchmark_tip_new::()); - assert_ok!(test_benchmark_tip::()); - assert_ok!(test_benchmark_close_tip::()); - assert_ok!(test_benchmark_propose_bounty::()); - assert_ok!(test_benchmark_approve_bounty::()); - assert_ok!(test_benchmark_propose_curator::()); - assert_ok!(test_benchmark_unassign_curator::()); - assert_ok!(test_benchmark_accept_curator::()); - assert_ok!(test_benchmark_award_bounty::()); - assert_ok!(test_benchmark_claim_bounty::()); - assert_ok!(test_benchmark_close_bounty_proposed::()); - assert_ok!(test_benchmark_close_bounty_active::()); - assert_ok!(test_benchmark_extend_bounty_expiry::()); assert_ok!(test_benchmark_on_initialize_proposals::()); - assert_ok!(test_benchmark_on_initialize_bounties::()); }); } } diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 2ada0660f9ec201b8983828f8b469559fc1c850e..835cf11d721ae11e4db6e94c4aee108f1bcab28a 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -17,10 +17,10 @@ //! # Treasury Module //! -//! The Treasury module provides a "pot" of funds that can be managed by stakeholders in the -//! system and a structure for making spending proposals from this pot. +//! The Treasury module provides a "pot" of funds that can be managed by stakeholders in the system +//! and a structure for making spending proposals from this pot. //! -//! - [`treasury::Trait`](./trait.Trait.html) +//! - [`treasury::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -32,71 +32,16 @@ //! By way of example, the Council could vote to fund the Treasury with a portion of the block //! reward and use the funds to pay developers. //! -//! ### Tipping -//! -//! A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be -//! given without first having a pre-determined stakeholder group come to consensus on how much -//! should be paid. -//! -//! A group of `Tippers` is determined through the config `Trait`. After half of these have declared -//! some amount that they believe a particular reported reason deserves, then a countdown period is -//! entered where any remaining members can declare their tip amounts also. After the close of the -//! countdown period, the median of all declared tips is paid to the reported beneficiary, along -//! with any finders fee, in case of a public (and bonded) original report. -//! -//! ### Bounty -//! -//! A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that -//! needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after -//! the bounty is approved and funded by Council, to be delegated -//! with the responsibility of assigning a payout address once the specified set of objectives is completed. -//! -//! After the Council has activated a bounty, it delegates the work that requires expertise to a curator -//! in exchange of a deposit. Once the curator accepts the bounty, they -//! get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout -//! address, the curator fee and the return of the curator deposit. The -//! delay allows for intervention through regular democracy. The Council gets to unassign the curator, -//! resulting in a new curator election. The Council also gets to cancel -//! the bounty if deemed necessary before assigning a curator or once the bounty is active or payout -//! is pending, resulting in the slash of the curator's deposit. -//! //! //! ### Terminology //! //! - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. -//! - **Beneficiary:** An account who will receive the funds from a proposal iff -//! the proposal is approved. -//! - **Deposit:** Funds that a proposer must lock when making a proposal. The -//! deposit will be returned or slashed if the proposal is approved or rejected -//! respectively. +//! - **Beneficiary:** An account who will receive the funds from a proposal iff the proposal is +//! approved. +//! - **Deposit:** Funds that a proposer must lock when making a proposal. The deposit will be +//! returned or slashed if the proposal is approved or rejected respectively. //! - **Pot:** Unspent funds accumulated by the treasury module. //! -//! Tipping protocol: -//! - **Tipping:** The process of gathering declarations of amounts to tip and taking the median -//! amount to be transferred from the treasury to a beneficiary account. -//! - **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a -//! particular individual (identified by an account ID) is worthy of a recognition by the -//! treasury. -//! - **Finder:** The original public reporter of some reason for tipping. -//! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, -//! rather than the main beneficiary. -//! -//! Bounty: -//! - **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by -//! the Treasury. -//! - **Proposer:** An account proposing a bounty spending. -//! - **Curator:** An account managing the bounty and assigning a payout address receiving the reward -//! for the completion of work. -//! - **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on -//! deposit per byte within the bounty description. -//! - **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit -//! is returned when/if the bounty is completed. -//! - **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is -//! rewarded. -//! - **Payout address:** The account to which the total or part of the bounty is assigned to. -//! - **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. -//! - **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. -//! //! ## Interface //! //! ### Dispatchable Functions @@ -106,62 +51,43 @@ //! - `reject_proposal` - Reject a proposal, slashing the deposit. //! - `approve_proposal` - Accept the proposal, returning the deposit. //! -//! Tipping protocol: -//! - `report_awesome` - Report something worthy of a tip and register for a finders fee. -//! - `retract_tip` - Retract a previous (finders fee registered) report. -//! - `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. -//! - `tip` - Declare or redeclare an amount to tip for a particular reason. -//! - `close_tip` - Close and pay out a tip. -//! -//! Bounty protocol: -//! - `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of -//! tasks and stake the required deposit. -//! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. -//! - `propose_curator` - Assign an account to a bounty as candidate curator. -//! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. -//! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. -//! - `award_bounty` - Close and pay out the specified amount for the completed work. -//! - `claim_bounty` - Claim a specific bounty amount from the Payout Address. -//! - `unassign_curator` - Unassign an accepted curator from a specific earmark. -//! - `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. -//! -//! //! ## GenesisConfig //! //! The Treasury module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(test)] mod tests; mod benchmarking; + pub mod weights; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error, Parameter}; +use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error}; use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{KeepAlive, AllowDeath}, + Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{KeepAlive}, ReservableCurrency, WithdrawReasons }; -use sp_runtime::{Permill, ModuleId, Percent, RuntimeDebug, DispatchResult, traits::{ - Zero, StaticLookup, AccountIdConversion, Saturating, Hash, BadOrigin +use sp_runtime::{Permill, ModuleId, RuntimeDebug, traits::{ + Zero, StaticLookup, AccountIdConversion, Saturating }}; -use frame_support::dispatch::DispatchResultWithPostInfo; use frame_support::weights::{Weight, DispatchClass}; -use frame_support::traits::{Contains, ContainsLengthBound, EnsureOrigin}; +use frame_support::traits::{EnsureOrigin}; use codec::{Encode, Decode}; -use frame_system::{self as system, ensure_signed}; +use frame_system::{ensure_signed}; pub use weights::WeightInfo; -type BalanceOf = - <>::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = - <>::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = - <>::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +pub type PositiveImbalanceOf = + <>::Currency as Currency<::AccountId>>::PositiveImbalance; +pub type NegativeImbalanceOf = + <>::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The treasury's module id, used for deriving its sovereign account ID. type ModuleId: Get; @@ -174,25 +100,8 @@ pub trait Trait: frame_system::Trait { /// Origin from which rejections must come. type RejectOrigin: EnsureOrigin; - /// Origin from which tippers must come. - /// - /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy operation). - type Tippers: Contains + ContainsLengthBound; - - /// The period for which a tip remains open after is has achieved threshold tippers. - type TipCountdown: Get; - - /// The percent of the final tip which goes to the original reporter of the tip. - type TipFindersFee: Get; - - /// The amount held on deposit for placing a tip report. - type TipReportDepositBase: Get>; - - /// The amount held on deposit per byte within the tip report reason or bounty description. - type DataDepositPerByte: Get>; - /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. type OnSlash: OnUnbalanced>; @@ -210,29 +119,36 @@ pub trait Trait: frame_system::Trait { /// Percentage of spare funds (if any) that are burnt per spend period. type Burn: Get; - /// The amount held on deposit for placing a bounty proposal. - type BountyDepositBase: Get>; - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - type BountyDepositPayoutDelay: Get; - - /// Bounty duration in blocks. - type BountyUpdatePeriod: Get; - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - type BountyCuratorDeposit: Get; - - /// Minimum value for a bounty. - type BountyValueMinimum: Get>; - - /// Maximum acceptable reason length. - type MaximumReasonLength: Get; - /// Handler for the unbalanced decrease when treasury funds are burned. type BurnDestination: OnUnbalanced>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Runtime hooks to external pallet using treasury to compute spend funds. + type SpendFunds: SpendFunds; +} + +/// A trait to allow the Treasury Pallet to spend it's funds for other purposes. +/// There is an expectation that the implementer of this trait will correctly manage +/// the mutable variables passed to it: +/// * `budget_remaining`: How much available funds that can be spent by the treasury. +/// As funds are spent, you must correctly deduct from this value. +/// * `imbalance`: Any imbalances that you create should be subsumed in here to +/// maximize efficiency of updating the total issuance. (i.e. `deposit_creating`) +/// * `total_weight`: Track any weight that your `spend_fund` implementation uses by +/// updating this value. +/// * `missed_any`: If there were items that you want to spend on, but there were +/// not enough funds, mark this value as `true`. This will prevent the treasury +/// from burning the excess funds. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait SpendFunds, I=DefaultInstance> { + fn spend_funds( + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, + total_weight: &mut Weight, + missed_any: &mut bool, + ); } /// An index of a proposal. Just a `u32`. @@ -252,122 +168,18 @@ pub struct Proposal { bond: Balance, } -/// An open tipping "motion". Retains all details of a tip including information on the finder -/// and the members who have voted. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] -pub struct OpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, -> { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip. - finder: AccountId, - /// The amount held on deposit for this tip. - deposit: Balance, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - /// Whether this tip should result in the finder taking a fee. - finders_fee: bool, -} - -/// An index of a bounty. Just a `u32`. -pub type BountyIndex = u32; - -/// A bounty proposal. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct Bounty { - /// The account proposing it. - proposer: AccountId, - /// The (total) amount that should be paid if the bounty is rewarded. - value: Balance, - /// The curator fee. Included in value. - fee: Balance, - /// The deposit of curator. - curator_deposit: Balance, - /// The amount held on deposit (reserved) for making this proposal. - bond: Balance, - /// The status of this bounty. - status: BountyStatus, -} - -/// The status of a bounty proposal. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub enum BountyStatus { - /// The bounty is proposed and waiting for approval. - Proposed, - /// The bounty is approved and waiting to become active at next spend period. - Approved, - /// The bounty is funded and waiting for curator assignment. - Funded, - /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the curator. - CuratorProposed { - /// The assigned curator of this bounty. - curator: AccountId, - }, - /// The bounty is active and waiting to be awarded. - Active { - /// The curator of this bounty. - curator: AccountId, - /// An update from the curator is due by this block, else they are considered inactive. - update_due: BlockNumber, - }, - /// The bounty is awarded and waiting to released after a delay. - PendingPayout { - /// The curator of this bounty. - curator: AccountId, - /// The beneficiary of the bounty. - beneficiary: AccountId, - /// When the bounty can be claimed. - unlock_at: BlockNumber, - }, -} - decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Treasury { + trait Store for Module, I: Instance=DefaultInstance> as Treasury { /// Number of proposals that have been made. ProposalCount get(fn proposal_count): ProposalIndex; /// Proposals that have been made. - Proposals get(fn proposals): + pub Proposals get(fn proposals): map hasher(twox_64_concat) ProposalIndex => Option>>; /// Proposal indices that have been approved but not yet awarded. - Approvals get(fn approvals): Vec; - - /// Tips that are not yet completed. Keyed by the hash of `(reason, who)` from the value. - /// This has the insecure enumerable hash function since the key itself is already - /// guaranteed to be a secure hash. - pub Tips get(fn tips): - map hasher(twox_64_concat) T::Hash - => Option, T::BlockNumber, T::Hash>>; - - /// Simple preimage lookup from the reason's hash to the original data. Again, has an - /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. - pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; - - /// Number of bounty proposals that have been made. - pub BountyCount get(fn bounty_count): BountyIndex; - - /// Bounties that have been made. - pub Bounties get(fn bounties): - map hasher(twox_64_concat) BountyIndex - => Option, T::BlockNumber>>; - - /// The description of each bounty. - pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; - - /// Bounty indices that have been approved but not yet funded. - pub BountyApprovals get(fn bounty_approvals): Vec; + pub Approvals get(fn approvals): Vec; } add_extra_genesis { build(|_config| { @@ -388,8 +200,7 @@ decl_event!( pub enum Event where Balance = BalanceOf, - ::AccountId, - ::Hash, + ::AccountId, { /// New proposal. \[proposal_index\] Proposed(ProposalIndex), @@ -406,66 +217,21 @@ decl_event!( Rollover(Balance), /// Some funds have been deposited. \[deposit\] Deposit(Balance), - /// A new tip suggestion has been opened. \[tip_hash\] - NewTip(Hash), - /// A tip suggestion has reached threshold and is closing. \[tip_hash\] - TipClosing(Hash), - /// A tip suggestion has been closed. \[tip_hash, who, payout\] - TipClosed(Hash, AccountId, Balance), - /// A tip suggestion has been retracted. \[tip_hash\] - TipRetracted(Hash), - /// New bounty proposal. [index] - BountyProposed(BountyIndex), - /// A bounty proposal was rejected; funds were slashed. [index, bond] - BountyRejected(BountyIndex, Balance), - /// A bounty proposal is funded and became active. [index] - BountyBecameActive(BountyIndex), - /// A bounty is awarded to a beneficiary. [index, beneficiary] - BountyAwarded(BountyIndex, AccountId), - /// A bounty is claimed by beneficiary. [index, payout, beneficiary] - BountyClaimed(BountyIndex, Balance, AccountId), - /// A bounty is cancelled. [index] - BountyCanceled(BountyIndex), - /// A bounty expiry is extended. [index] - BountyExtended(BountyIndex), } ); decl_error! { /// Error for the treasury module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// Proposer's balance is too low. InsufficientProposersBalance, /// No proposal or bounty at that index. InvalidIndex, - /// The reason given is just too big. - ReasonTooBig, - /// The tip was already found/started. - AlreadyKnown, - /// The tip hash is unknown. - UnknownTip, - /// The account attempting to retract the tip is not the finder of the tip. - NotFinder, - /// The tip cannot be claimed/closed because there are not enough tippers yet. - StillOpen, - /// The tip cannot be claimed/closed because it's still in the countdown period. - Premature, - /// The bounty status is unexpected. - UnexpectedStatus, - /// Require bounty curator. - RequireCurator, - /// Invalid bounty value. - InvalidValue, - /// Invalid bounty fee. - InvalidFee, - /// A bounty payout is pending. - /// To cancel the bounty, you must unassign and slash the curator. - PendingPayout, } } decl_module! { - pub struct Module, I: Instance=DefaultInstance> + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { @@ -482,35 +248,9 @@ decl_module! { /// Percentage of spare funds (if any) that are burnt per spend period. const Burn: Permill = T::Burn::get(); - /// The period for which a tip remains open after is has achieved threshold tippers. - const TipCountdown: T::BlockNumber = T::TipCountdown::get(); - - /// The amount of the final tip which goes to the original reporter of the tip. - const TipFindersFee: Percent = T::TipFindersFee::get(); - - /// The amount held on deposit for placing a tip report. - const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); - - /// The amount held on deposit per byte within the tip report reason or bounty description. - const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); - /// The treasury's module id, used for deriving its sovereign account ID. const ModuleId: ModuleId = T::ModuleId::get(); - /// The amount held on deposit for placing a bounty proposal. - const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); - - const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); - - /// Maximum acceptable reason length. - const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); - type Error = Error; fn deposit_event() = default; @@ -525,7 +265,7 @@ decl_module! { /// - DbWrites: `ProposalCount`, `Proposals`, `origin account` /// # #[weight = T::WeightInfo::propose_spend()] - fn propose_spend( + pub fn propose_spend( origin, #[compact] value: BalanceOf, beneficiary: ::Source @@ -554,7 +294,7 @@ decl_module! { /// - DbWrites: `Proposals`, `rejected proposer account` /// # #[weight = (T::WeightInfo::reject_proposal(), DispatchClass::Operational)] - fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { + pub fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { T::RejectOrigin::ensure_origin(origin)?; let proposal = >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; @@ -576,571 +316,13 @@ decl_module! { /// - DbWrite: `Approvals` /// # #[weight = (T::WeightInfo::approve_proposal(), DispatchClass::Operational)] - fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { + pub fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { T::ApproveOrigin::ensure_origin(origin)?; ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); Approvals::::append(proposal_id); } - /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `DataDepositPerByte` for each byte in `reason`. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - Complexity: `O(R)` where `R` length of `reason`. - /// - encoding and hashing of 'reason' - /// - DbReads: `Reasons`, `Tips` - /// - DbWrites: `Reasons`, `Tips` - /// # - #[weight = T::WeightInfo::report_awesome(reason.len() as u32)] - fn report_awesome(origin, reason: Vec, who: T::AccountId) { - let finder = ensure_signed(origin)?; - - ensure!(reason.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); - - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); - - let deposit = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * (reason.len() as u32).into(); - T::Currency::reserve(&finder, deposit)?; - - Reasons::::insert(&reason_hash, &reason); - let tip = OpenTip { - reason: reason_hash, - who, - finder, - deposit, - closes: None, - tips: vec![], - finders_fee: true - }; - Tips::::insert(&hash, tip); - Self::deposit_event(RawEvent::NewTip(hash)); - } - - /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. - /// - /// If successful, the original deposit will be unreserved. - /// - /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` - /// must have been reported by the signing account through `report_awesome` (and not - /// through `tip_new`). - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// Emits `TipRetracted` if successful. - /// - /// # - /// - Complexity: `O(1)` - /// - Depends on the length of `T::Hash` which is fixed. - /// - DbReads: `Tips`, `origin account` - /// - DbWrites: `Reasons`, `Tips`, `origin account` - /// # - #[weight = T::WeightInfo::retract_tip()] - fn retract_tip(origin, hash: T::Hash) { - let who = ensure_signed(origin)?; - let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; - ensure!(tip.finder == who, Error::::NotFinder); - - Reasons::::remove(&tip.reason); - Tips::::remove(&hash); - if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&who, tip.deposit); - } - Self::deposit_event(RawEvent::TipRetracted(hash)); - } - - /// Give a tip for something new; no finder's fee will be taken. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - Complexity: `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. - /// - `O(T)`: decoding `Tipper` vec of length `T` - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - `O(R)`: hashing and encoding of reason of length `R` - /// - DbReads: `Tippers`, `Reasons` - /// - DbWrites: `Reasons`, `Tips` - /// # - #[weight = T::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32)] - fn tip_new(origin, reason: Vec, who: T::AccountId, #[compact] tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - - Reasons::::insert(&reason_hash, &reason); - Self::deposit_event(RawEvent::NewTip(hash.clone())); - let tips = vec![(tipper.clone(), tip_value)]; - let tip = OpenTip { - reason: reason_hash, - who, - finder: tipper, - deposit: Zero::zero(), - closes: None, - tips, - finders_fee: false, - }; - Tips::::insert(&hash, tip); - } - - /// Declare a tip value for an already-open tip. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary - /// account ID. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period - /// has started. - /// - /// # - /// - Complexity: `O(T)` where `T` is the number of tippers. - /// decoding `Tipper` vec of length `T`, insert tip and check closing, - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - /// Actually weight could be lower as it depends on how many tips are in `OpenTip` but it - /// is weighted as if almost full i.e of length `T-1`. - /// - DbReads: `Tippers`, `Tips` - /// - DbWrites: `Tips` - /// # - #[weight = T::WeightInfo::tip(T::Tippers::max_len() as u32)] - fn tip(origin, hash: T::Hash, #[compact] tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - - let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { - Self::deposit_event(RawEvent::TipClosing(hash.clone())); - } - Tips::::insert(&hash, tip); - } - - /// Close and payout a tip. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// The tip identified by `hash` must have finished its countdown period. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// # - /// - Complexity: `O(T)` where `T` is the number of tippers. - /// decoding `Tipper` vec of length `T`. - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - DbReads: `Tips`, `Tippers`, `tip finder` - /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` - /// # - #[weight = T::WeightInfo::close_tip(T::Tippers::max_len() as u32)] - fn close_tip(origin, hash: T::Hash) { - ensure_signed(origin)?; - - let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; - ensure!(system::Module::::block_number() >= *n, Error::::Premature); - // closed. - Reasons::::remove(&tip.reason); - Tips::::remove(hash); - Self::payout_tip(hash, tip); - } - - /// Propose a new bounty. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `DataDepositPerByte` for each byte in `reason`. It will be unreserved upon approval, - /// or slashed when rejected. - /// - /// - `curator`: The curator account whom will manage this bounty. - /// - `fee`: The curator fee. - /// - `value`: The total payment amount of this bounty, curator fee included. - /// - `description`: The description of this bounty. - #[weight = T::WeightInfo::propose_bounty(description.len() as u32)] - fn propose_bounty( - origin, - #[compact] value: BalanceOf, - description: Vec, - ) { - let proposer = ensure_signed(origin)?; - Self::create_bounty(proposer, description, value)?; - } - - /// Approve a bounty proposal. At a later time, the bounty will be funded and become active - /// and the original deposit will be returned. - /// - /// May only be called from `T::ApproveOrigin`. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::approve_bounty()] - fn approve_bounty(origin, #[compact] bounty_id: ProposalIndex) { - T::ApproveOrigin::ensure_origin(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); - - bounty.status = BountyStatus::Approved; - - BountyApprovals::::append(bounty_id); - - Ok(()) - })?; - } - - /// Assign a curator to a funded bounty. - /// - /// May only be called from `T::ApproveOrigin`. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::propose_curator()] - fn propose_curator( - origin, - #[compact] bounty_id: ProposalIndex, - curator: ::Source, - #[compact] fee: BalanceOf, - ) { - T::ApproveOrigin::ensure_origin(origin)?; - - let curator = T::Lookup::lookup(curator)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - match bounty.status { - BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => {}, - _ => return Err(Error::::UnexpectedStatus.into()), - }; - - ensure!(fee < bounty.value, Error::::InvalidFee); - - bounty.status = BountyStatus::CuratorProposed { curator }; - bounty.fee = fee; - - Ok(()) - })?; - } - - /// Unassign curator from a bounty. - /// - /// This function can only be called by the `RejectOrigin` a signed origin. - /// - /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious - /// or inactive. As a result, we will slash the curator when possible. - /// - /// If the origin is the curator, we take this as a sign they are unable to do their job and - /// they willingly give up. We could slash them, but for now we allow them to recover their - /// deposit and exit without issue. (We may want to change this if it is abused.) - /// - /// Finally, the origin can be anyone if and only if the curator is "inactive". This allows - /// anyone in the community to call out that a curator is not doing their due diligence, and - /// we should pick a new curator. In this case the curator should also be slashed. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::unassign_curator()] - fn unassign_curator( - origin, - #[compact] bounty_id: ProposalIndex, - ) { - let maybe_sender = ensure_signed(origin.clone()) - .map(Some) - .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { - let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; - T::OnSlash::on_unbalanced(imbalance); - *curator_deposit = Zero::zero(); - }; - - match bounty.status { - BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { - // No curator to unassign at this point. - return Err(Error::::UnexpectedStatus.into()) - } - BountyStatus::CuratorProposed { ref curator } => { - // A curator has been proposed, but not accepted yet. - // Either `RejectOrigin` or the proposed curator can unassign the curator. - ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); - }, - BountyStatus::Active { ref curator, ref update_due } => { - // The bounty is active. - match maybe_sender { - // If the `RejectOrigin` is calling this function, slash the curator. - None => { - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - }, - Some(sender) => { - // If the sender is not the curator, and the curator is inactive, - // slash the curator. - if sender != *curator { - let block_number = system::Module::::block_number(); - if *update_due < block_number { - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - } else { - // Curator has more time to give an update. - return Err(Error::::Premature.into()) - } - } else { - // Else this is the curator, willingly giving up their role. - // Give back their deposit. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - // Continue to change bounty status below... - } - }, - } - }, - BountyStatus::PendingPayout { ref curator, .. } => { - // The bounty is pending payout, so only council can unassign a curator. - // By doing so, they are claiming the curator is acting maliciously, so - // we slash the curator. - ensure!(maybe_sender.is_none(), BadOrigin); - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - } - }; - - bounty.status = BountyStatus::Funded; - Ok(()) - })?; - } - - /// Accept the curator role for a bounty. - /// A deposit will be reserved from curator and refund upon successful payout. - /// - /// May only be called from the curator. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::accept_curator()] - fn accept_curator(origin, #[compact] bounty_id: ProposalIndex) { - let signer = ensure_signed(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - match bounty.status { - BountyStatus::CuratorProposed { ref curator } => { - ensure!(signer == *curator, Error::::RequireCurator); - - let deposit = T::BountyCuratorDeposit::get() * bounty.fee; - T::Currency::reserve(curator, deposit)?; - bounty.curator_deposit = deposit; - - let update_due = system::Module::::block_number() + T::BountyUpdatePeriod::get(); - bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; - - Ok(()) - }, - _ => Err(Error::::UnexpectedStatus.into()), - } - })?; - } - - /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. - /// - /// The dispatch origin for this call must be the curator of this bounty. - /// - /// - `bounty_id`: Bounty ID to award. - /// - `beneficiary`: The beneficiary account whom will receive the payout. - #[weight = T::WeightInfo::award_bounty()] - fn award_bounty(origin, #[compact] bounty_id: ProposalIndex, beneficiary: ::Source) { - let signer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - match &bounty.status { - BountyStatus::Active { - curator, - .. - } => { - ensure!(signer == *curator, Error::::RequireCurator); - }, - _ => return Err(Error::::UnexpectedStatus.into()), - } - bounty.status = BountyStatus::PendingPayout { - curator: signer, - beneficiary: beneficiary.clone(), - unlock_at: system::Module::::block_number() + T::BountyDepositPayoutDelay::get(), - }; - - Ok(()) - })?; - - Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); - } - - /// Claim the payout from an awarded bounty after payout delay. - /// - /// The dispatch origin for this call must be the beneficiary of this bounty. - /// - /// - `bounty_id`: Bounty ID to claim. - #[weight = T::WeightInfo::claim_bounty()] - fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { - let _ = ensure_signed(origin)?; // anyone can trigger claim - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; - if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { - ensure!(system::Module::::block_number() >= unlock_at, Error::::Premature); - let bounty_account = Self::bounty_account_id(bounty_id); - let balance = T::Currency::free_balance(&bounty_account); - let fee = bounty.fee.min(balance); // just to be safe - let payout = balance.saturating_sub(fee); - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - let _ = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail - let _ = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail - *maybe_bounty = None; - - BountyDescriptions::::remove(bounty_id); - - Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); - Ok(()) - } else { - Err(Error::::UnexpectedStatus.into()) - } - })?; - } - - /// Cancel a proposed or active bounty. All the funds will be sent to treasury and - /// the curator deposit will be unreserved if possible. - /// - /// Only `T::RejectOrigin` is able to cancel a bounty. - /// - /// - `bounty_id`: Bounty ID to cancel. - #[weight = T::WeightInfo::close_bounty_proposed().max(T::WeightInfo::close_bounty_active())] - fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { - T::RejectOrigin::ensure_origin(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { - let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; - - match &bounty.status { - BountyStatus::Proposed => { - // The reject origin would like to cancel a proposed bounty. - BountyDescriptions::::remove(bounty_id); - let value = bounty.bond; - let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; - T::OnSlash::on_unbalanced(imbalance); - *maybe_bounty = None; - - Self::deposit_event(Event::::BountyRejected(bounty_id, value)); - // Return early, nothing else to do. - return Ok(Some(T::WeightInfo::close_bounty_proposed()).into()) - }, - BountyStatus::Approved => { - // For weight reasons, we don't allow a council to cancel in this phase. - // We ask for them to wait until it is funded before they can cancel. - return Err(Error::::UnexpectedStatus.into()) - }, - BountyStatus::Funded | - BountyStatus::CuratorProposed { .. } => { - // Nothing extra to do besides the removal of the bounty below. - }, - BountyStatus::Active { curator, .. } => { - // Cancelled by council, refund deposit of the working curator. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - // Then execute removal of the bounty below. - }, - BountyStatus::PendingPayout { .. } => { - // Bounty is already pending payout. If council wants to cancel - // this bounty, it should mean the curator was acting maliciously. - // So the council should first unassign the curator, slashing their - // deposit. - return Err(Error::::PendingPayout.into()) - } - } - - let bounty_account = Self::bounty_account_id(bounty_id); - - BountyDescriptions::::remove(bounty_id); - - let balance = T::Currency::free_balance(&bounty_account); - let _ = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail - *maybe_bounty = None; - - Self::deposit_event(Event::::BountyCanceled(bounty_id)); - Ok(Some(T::WeightInfo::close_bounty_active()).into()) - }) - } - - /// Extend the expiry time of an active bounty. - /// - /// The dispatch origin for this call must be the curator of this bounty. - /// - /// - `bounty_id`: Bounty ID to extend. - /// - `remark`: additional information. - #[weight = T::WeightInfo::extend_bounty_expiry()] - fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { - let signer = ensure_signed(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - match bounty.status { - BountyStatus::Active { ref curator, ref mut update_due } => { - ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (system::Module::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); - }, - _ => return Err(Error::::UnexpectedStatus.into()), - } - - Ok(()) - })?; - - Self::deposit_event(Event::::BountyExtended(bounty_id)); - } - /// # /// - Complexity: `O(A)` where `A` is the number of approvals /// - Db reads and writes: `Approvals`, `pot account data` @@ -1159,7 +341,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Module { // Add public immutables and private mutables. /// The account ID of the treasury pot. @@ -1170,93 +352,13 @@ impl, I: Instance> Module { T::ModuleId::get().into_account() } - /// The account ID of a bounty account - pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { - // only use two byte prefix to support 16 byte account id (used by test) - // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index - T::ModuleId::get().into_sub_account(("bt", id)) - } - /// The needed bond for a proposal whose spend is `value`. fn calculate_bond(value: BalanceOf) -> BalanceOf { T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value) } - /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it - /// closes, if so, then deposit the relevant event and set closing accordingly. - /// - /// `O(T)` and one storage access. - fn insert_tip_and_check_closing( - tip: &mut OpenTip, T::BlockNumber, T::Hash>, - tipper: T::AccountId, - tip_value: BalanceOf, - ) -> bool { - match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { - Ok(pos) => tip.tips[pos] = (tipper, tip_value), - Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), - } - Self::retain_active_tips(&mut tip.tips); - let threshold = (T::Tippers::count() + 1) / 2; - if tip.tips.len() >= threshold && tip.closes.is_none() { - tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); - true - } else { - false - } - } - - /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. - fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { - let members = T::Tippers::sorted_members(); - let mut members_iter = members.iter(); - let mut member = members_iter.next(); - tips.retain(|(ref a, _)| loop { - match member { - None => break false, - Some(m) if m > a => break false, - Some(m) => { - member = members_iter.next(); - if m < a { - continue - } else { - break true; - } - } - } - }); - } - - /// Execute the payout of a tip. - /// - /// Up to three balance operations. - /// Plus `O(T)` (`T` is Tippers length). - fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { - let mut tips = tip.tips; - Self::retain_active_tips(&mut tips); - tips.sort_by_key(|i| i.1); - let treasury = Self::account_id(); - let max_payout = Self::pot(); - let mut payout = tips[tips.len() / 2].1.min(max_payout); - if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&tip.finder, tip.deposit); - } - if tip.finders_fee { - if tip.finder != tip.who { - // pay out the finder's fee. - let finders_fee = T::TipFindersFee::get() * payout; - payout -= finders_fee; - // this should go through given we checked it's at most the free balance, but still - // we only make a best-effort. - let _ = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); - } - } - // same as above: best-effort only. - let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); - Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); - } - /// Spend some money! returns number of approvals before spend. - fn spend_funds() -> Weight { + pub fn spend_funds() -> Weight { let mut total_weight: Weight = Zero::zero(); let mut budget_remaining = Self::pot(); @@ -1295,38 +397,8 @@ impl, I: Instance> Module { total_weight += T::WeightInfo::on_initialize_proposals(proposals_len); - let bounties_len = BountyApprovals::::mutate(|v| { - let bounties_approval_len = v.len() as u32; - v.retain(|&index| { - Bounties::::mutate(index, |bounty| { - // Should always be true, but shouldn't panic if false or we're screwed. - if let Some(bounty) = bounty { - if bounty.value <= budget_remaining { - budget_remaining -= bounty.value; - - bounty.status = BountyStatus::Funded; - - // return their deposit. - let _ = T::Currency::unreserve(&bounty.proposer, bounty.bond); - - // fund the bounty account - imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); - - Self::deposit_event(RawEvent::BountyBecameActive(index)); - false - } else { - missed_any = true; - true - } - } else { - false - } - }) - }); - bounties_approval_len - }); - - total_weight += T::WeightInfo::on_initialize_bounties(bounties_len); + // Call Runtime hooks to external pallet using treasury to compute spend funds. + T::SpendFunds::spend_funds( &mut budget_remaining, &mut imbalance, &mut total_weight, &mut missed_any); if !missed_any { // burn some proportion of the remaining budget if we run a surplus. @@ -1361,98 +433,15 @@ impl, I: Instance> Module { /// Return the amount of money in the pot. // The existential deposit is not part of the pot so treasury account never gets deleted. - fn pot() -> BalanceOf { + pub fn pot() -> BalanceOf { T::Currency::free_balance(&Self::account_id()) // Must never be less than 0 but better be safe. .saturating_sub(T::Currency::minimum_balance()) } - fn create_bounty( - proposer: T::AccountId, - description: Vec, - value: BalanceOf, - ) -> DispatchResult { - ensure!(description.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); - ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); - - let index = Self::bounty_count(); - - // reserve deposit for new bounty - let bond = T::BountyDepositBase::get() - + T::DataDepositPerByte::get() * (description.len() as u32).into(); - T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; - - BountyCount::::put(index + 1); - - let bounty = Bounty { - proposer, - value, - fee: 0u32.into(), - curator_deposit: 0u32.into(), - bond, - status: BountyStatus::Proposed, - }; - - Bounties::::insert(index, &bounty); - BountyDescriptions::::insert(index, description); - - Self::deposit_event(RawEvent::BountyProposed(index)); - - Ok(()) - } - - pub fn migrate_retract_tip_for_tip_new() { - /// An open tipping "motion". Retains all details of a tip including information on the finder - /// and the members who have voted. - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] - pub struct OldOpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, - > { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - } - - use frame_support::{Twox64Concat, migration::StorageKeyIterator}; - - for (hash, old_tip) in StorageKeyIterator::< - T::Hash, - OldOpenTip, T::BlockNumber, T::Hash>, - Twox64Concat, - >::new(I::PREFIX.as_bytes(), b"Tips").drain() - { - let (finder, deposit, finders_fee) = match old_tip.finder { - Some((finder, deposit)) => (finder, deposit, true), - None => (T::AccountId::default(), Zero::zero(), false), - }; - let new_tip = OpenTip { - reason: old_tip.reason, - who: old_tip.who, - finder, - deposit, - closes: old_tip.closes, - tips: old_tip.tips, - finders_fee - }; - Tips::::insert(hash, new_tip) - } - } } -impl, I: Instance> OnUnbalanced> for Module { +impl, I: Instance> OnUnbalanced> for Module { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 88c4f23b91ae24911707f103b1fd34b2463614ce..8db303a426d01102c144ecfe57b186335d9350c8 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -22,21 +22,22 @@ use super::*; use std::cell::RefCell; use frame_support::{ - assert_noop, assert_ok, impl_outer_origin, impl_outer_event, parameter_types, weights::Weight, - traits::{Contains, OnInitialize} + assert_noop, assert_ok, impl_outer_origin, impl_outer_event, parameter_types, + traits::{OnInitialize} }; +use frame_system::{self as system}; + use sp_core::H256; use sp_runtime::{ - Perbill, ModuleId, + ModuleId, testing::Header, - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + traits::{BlakeTwo256, IdentityLookup}, }; impl_outer_origin! { pub enum Origin for Test where system = frame_system {} } - mod treasury { // Re-export needed for `impl_outer_event!`. pub use super::super::*; @@ -50,17 +51,18 @@ impl_outer_event! { } } - #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -72,13 +74,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -89,7 +84,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type Event = Event; @@ -101,69 +96,30 @@ impl pallet_balances::Trait for Test { thread_local! { static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } -pub struct TenToFourteen; -impl Contains for TenToFourteen { - fn sorted_members() -> Vec { - TEN_TO_FOURTEEN.with(|v| { - v.borrow().clone() - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn add(new: &u128) { - TEN_TO_FOURTEEN.with(|v| { - let mut members = v.borrow_mut(); - members.push(*new); - members.sort(); - }) - } -} -impl ContainsLengthBound for TenToFourteen { - fn max_len() -> usize { - TEN_TO_FOURTEEN.with(|v| v.borrow().len()) - } - fn min_len() -> usize { 0 } -} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const ProposalBondMinimum: u64 = 1; pub const SpendPeriod: u64 = 2; pub const Burn: Permill = Permill::from_percent(50); - pub const TipCountdown: u64 = 1; - pub const TipFindersFee: Percent = Percent::from_percent(20); - pub const TipReportDepositBase: u64 = 1; - pub const DataDepositPerByte: u64 = 1; - pub const BountyDepositBase: u64 = 80; - pub const BountyDepositPayoutDelay: u64 = 3; pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); pub const BountyUpdatePeriod: u32 = 20; - pub const MaximumReasonLength: u32 = 16384; pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); pub const BountyValueMinimum: u64 = 1; } -impl Trait for Test { +impl Config for Test { type ModuleId = TreasuryModuleId; type Currency = pallet_balances::Module; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Tippers = TenToFourteen; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type DataDepositPerByte = DataDepositPerByte; type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; - type BountyDepositBase = BountyDepositBase; - type BountyDepositPayoutDelay = BountyDepositPayoutDelay; - type BountyUpdatePeriod = BountyUpdatePeriod; - type BountyCuratorDeposit = BountyCuratorDeposit; - type BountyValueMinimum = BountyValueMinimum; - type MaximumReasonLength = MaximumReasonLength; type BurnDestination = (); // Just gets burned. type WeightInfo = (); + type SpendFunds = (); } type System = frame_system::Module; type Balances = pallet_balances::Module; @@ -179,15 +135,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } -fn last_event() -> RawEvent { - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap() -} - #[test] fn genesis_config_works() { new_test_ext().execute_with(|| { @@ -196,163 +143,6 @@ fn genesis_config_works() { }); } -fn tip_hash() -> H256 { - BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128)) -} - -#[test] -fn tip_new_cannot_be_used_twice() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - assert_noop!( - Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), - Error::::AlreadyKnown - ); - }); -} - -#[test] -fn report_awesome_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - - // other reports don't count. - assert_noop!( - Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), - Error::::AlreadyKnown - ); - - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 102); - assert_eq!(Balances::free_balance(3), 8); - }); -} - -#[test] -fn report_awesome_from_beneficiary_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 110); - }); -} - -#[test] -fn close_tip_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - - let h = tip_hash(); - - assert_eq!(last_event(), RawEvent::NewTip(h)); - - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); - - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - - assert_eq!(last_event(), RawEvent::TipClosing(h)); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); - - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::none(), h.into()), BadOrigin); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - - assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10)); - - assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); - }); -} - -#[test] -fn retract_tip_works() { - new_test_ext().execute_with(|| { - // with report awesome - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); - - // with tip new - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(10), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); - }); -} - -#[test] -fn tip_median_calculation_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); -} - -#[test] -fn tip_changing_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); -} - #[test] fn minting_works() { new_test_ext().execute_with(|| { @@ -559,596 +349,6 @@ fn inexistent_account_works() { }); } -#[test] -fn propose_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); - - assert_eq!(last_event(), RawEvent::BountyProposed(0)); - - let deposit: u64 = 85 + 5; - assert_eq!(Balances::reserved_balance(0), deposit); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 10, - bond: deposit, - status: BountyStatus::Proposed, - }); - - assert_eq!(Treasury::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); - - assert_eq!(Treasury::bounty_count(), 1); - }); -} - -#[test] -fn propose_bounty_validation_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), - Error::::ReasonTooBig - ); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), - Error::::InsufficientProposersBalance - ); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), - Error::::InvalidValue - ); - }); -} - -#[test] -fn close_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); - - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - let deposit: u64 = 80 + 5; - - assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); - - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - assert_eq!(Treasury::bounties(0), None); - assert!(!Bounties::::contains_key(0)); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn approve_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Treasury::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - let deposit: u64 = 80 + 5; - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - value: 50, - curator_deposit: 0, - bond: deposit, - status: BountyStatus::Approved, - }); - assert_eq!(Treasury::bounty_approvals(), vec![0]); - - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); - - // deposit not returned yet - assert_eq!(Balances::reserved_balance(0), deposit); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - >::on_initialize(2); - - // return deposit - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 100); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: deposit, - status: BountyStatus::Funded, - }); - assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 50); - }); -} - -#[test] -fn assign_curator_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::CuratorProposed { - curator: 4, - }, - }); - - assert_noop!(Treasury::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); - assert_noop!(Treasury::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance); - - Balances::make_free_balance_be(&4, 10); - - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::Active { - curator: 4, - update_due: 22, - }, - }); - - assert_eq!(Balances::free_balance(&4), 8); - assert_eq!(Balances::reserved_balance(&4), 2); - }); -} - -#[test] -fn unassign_curator_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - assert_noop!(Treasury::unassign_curator(Origin::signed(1), 0), BadOrigin); - - assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - Balances::make_free_balance_be(&4, 10); - - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(&4), 8); - assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 - }); -} - -#[test] -fn award_and_claim_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 10); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit - - assert_noop!(Treasury::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator); - - assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::PendingPayout { - curator: 4, - beneficiary: 3, - unlock_at: 5 - }, - }); - - assert_noop!(Treasury::claim_bounty(Origin::signed(1), 0), Error::::Premature); - - System::set_block_number(5); - >::on_initialize(5); - - assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); - - assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); - - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); - - assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 - assert_eq!(Balances::free_balance(3), 56); - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn claim_handles_high_fee() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 30); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 49)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); - - System::set_block_number(5); - >::on_initialize(5); - - // make fee > balance - let _ = Balances::slash(&Treasury::bounty_account_id(0), 10); - - assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); - - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); - - assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn cancel_and_refund() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 60); - - assert_noop!(Treasury::close_bounty(Origin::signed(0), 0), BadOrigin); - - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - assert_eq!(Treasury::pot(), 85); // - 25 + 10 - }); -} - -#[test] -fn award_and_cancel() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 0, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(0), 0)); - - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - - assert_ok!(Treasury::award_bounty(Origin::signed(0), 0, 3)); - - // Cannot close bounty directly when payout is happening... - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::PendingPayout); - - // Instead unassign the curator to slash them and then close. - assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - assert_eq!(last_event(), RawEvent::BountyCanceled(0)); - - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - // Slashed. - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn expire_and_unassign() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 1, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(1), 0)); - - assert_eq!(Balances::free_balance(1), 93); - assert_eq!(Balances::reserved_balance(1), 5); - - System::set_block_number(22); - >::on_initialize(22); - - assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); - - System::set_block_number(23); - >::on_initialize(23); - - assert_ok!(Treasury::unassign_curator(Origin::signed(0), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(1), 93); - assert_eq!(Balances::reserved_balance(1), 0); // slashed - - }); -} - -#[test] -fn extend_expiry() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 10); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 5); - assert_eq!(Balances::reserved_balance(4), 5); - - System::set_block_number(10); - >::on_initialize(10); - - assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator); - assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, - }); - - assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same - }); - - System::set_block_number(25); - >::on_initialize(25); - - assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); - assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 10); // not slashed - assert_eq!(Balances::reserved_balance(4), 0); - }); -} - -#[test] -fn test_last_reward_migration() { - use sp_storage::Storage; - - let mut s = Storage::default(); - - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] - pub struct OldOpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, - > { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - } - - let reason1 = BlakeTwo256::hash(b"reason1"); - let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); - - let old_tip_finder = OldOpenTip:: { - reason: reason1, - who: 10, - finder: Some((20, 30)), - closes: Some(13), - tips: vec![(40, 50), (60, 70)] - }; - - let reason2 = BlakeTwo256::hash(b"reason2"); - let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); - - let old_tip_no_finder = OldOpenTip:: { - reason: reason2, - who: 20, - finder: None, - closes: Some(13), - tips: vec![(40, 50), (60, 70)] - }; - - let data = vec![ - ( - Tips::::hashed_key_for(hash1), - old_tip_finder.encode().to_vec() - ), - ( - Tips::::hashed_key_for(hash2), - old_tip_no_finder.encode().to_vec() - ), - ]; - - s.top = data.into_iter().collect(); - sp_io::TestExternalities::new(s).execute_with(|| { - Treasury::migrate_retract_tip_for_tip_new(); - - // Test w/ finder - assert_eq!( - Tips::::get(hash1), - Some(OpenTip { - reason: reason1, - who: 10, - finder: 20, - deposit: 30, - closes: Some(13), - tips: vec![(40, 50), (60, 70)], - finders_fee: true, - }) - ); - - // Test w/o finder - assert_eq!( - Tips::::get(hash2), - Some(OpenTip { - reason: reason2, - who: 20, - finder: Default::default(), - deposit: 0, - closes: Some(13), - tips: vec![(40, 50), (60, 70)], - finders_fee: false, - }) - ); - }); -} - #[test] fn genesis_funding_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index 646b9869f47efceb8f8c83917a322741b752511e..3bc1fcd2308718133498300f1a8598d300512cd7 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -15,13 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for pallet_treasury +//! Autogenerated weights for pallet_treasury +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-16, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: -// target/release/substrate +// ./target/release/substrate // benchmark // --chain=dev // --steps=50 @@ -46,293 +47,62 @@ pub trait WeightInfo { fn propose_spend() -> Weight; fn reject_proposal() -> Weight; fn approve_proposal() -> Weight; - fn report_awesome(r: u32, ) -> Weight; - fn retract_tip() -> Weight; - fn tip_new(r: u32, t: u32, ) -> Weight; - fn tip(t: u32, ) -> Weight; - fn close_tip(t: u32, ) -> Weight; - fn propose_bounty(d: u32, ) -> Weight; - fn approve_bounty() -> Weight; - fn propose_curator() -> Weight; - fn unassign_curator() -> Weight; - fn accept_curator() -> Weight; - fn award_bounty() -> Weight; - fn claim_bounty() -> Weight; - fn close_bounty_proposed() -> Weight; - fn close_bounty_active() -> Weight; - fn extend_bounty_expiry() -> Weight; fn on_initialize_proposals(p: u32, ) -> Weight; - fn on_initialize_bounties(b: u32, ) -> Weight; - } /// Weights for pallet_treasury using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn propose_spend() -> Weight { - (56_844_000 as Weight) + (59_986_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn reject_proposal() -> Weight { - (46_098_000 as Weight) + (48_300_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } fn approve_proposal() -> Weight { - (13_622_000 as Weight) + (14_054_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) + (86_038_000 as Weight) + // Standard Error: 18_000 + .saturating_add((78_781_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } - } // For backwards compatibility and tests impl WeightInfo for () { fn propose_spend() -> Weight { - (56_844_000 as Weight) + (59_986_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn reject_proposal() -> Weight { - (46_098_000 as Weight) + (48_300_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - } fn approve_proposal() -> Weight { - (13_622_000 as Weight) + (14_054_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn report_awesome(r: u32, ) -> Weight { - (71_823_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn retract_tip() -> Weight { - (60_150_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (46_522_000 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((145_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn tip(t: u32, ) -> Weight { - (33_790_000 as Weight) - .saturating_add((713_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn close_tip(t: u32, ) -> Weight { - (113_040_000 as Weight) - .saturating_add((375_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn propose_bounty(d: u32, ) -> Weight { - (60_887_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn approve_bounty() -> Weight { - (17_337_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn propose_curator() -> Weight { - (14_068_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn unassign_curator() -> Weight { - (49_717_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn accept_curator() -> Weight { - (50_596_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - - } - fn award_bounty() -> Weight { - (36_030_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - - } - fn claim_bounty() -> Weight { - (167_088_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) - - } - fn close_bounty_proposed() -> Weight { - (48_977_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - - } - fn close_bounty_active() -> Weight { - (110_959_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - - } - fn extend_bounty_expiry() -> Weight { - (34_987_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } fn on_initialize_proposals(p: u32, ) -> Weight { - (76_596_000 as Weight) - .saturating_add((73_988_000 as Weight).saturating_mul(p as Weight)) + (86_038_000 as Weight) + // Standard Error: 18_000 + .saturating_add((78_781_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } - fn on_initialize_bounties(b: u32, ) -> Weight { - (75_165_000 as Weight) - .saturating_add((73_634_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } - } diff --git a/frame/utility/README.md b/frame/utility/README.md index 396396929118086dd12ba150d85eb240e3284de9..f7c0923cd5497b66a5a3f6f1f7a48750c2ae17f7 100644 --- a/frame/utility/README.md +++ b/frame/utility/README.md @@ -33,6 +33,6 @@ filtered by any proxy. * `as_derivative` - Dispatch a call from a derivative signed origin. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 413ed66ac8498f4844d51ebceb81e60f1c0bbd07..501e1b293bcc1677d63829ae29b1b8eb7c2fcb4f 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -25,9 +25,9 @@ use frame_benchmarking::{benchmarks, account, whitelisted_caller}; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::Event) { let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::Event = generic_event.into(); // compare to the last event record let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); @@ -38,7 +38,7 @@ benchmarks! { batch { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); @@ -59,7 +59,7 @@ benchmarks! { batch_all { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark(vec![]).into(); calls.push(call); diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index e7ff09c8f0db1202805a511941881c8f70f74d20..3aee32b250d5b9623e24fd054d7082dc8376f419 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -18,7 +18,7 @@ //! # Utility Module //! A stateless module with helpers for dispatch management which does no re-authentication. //! -//! - [`utility::Trait`](./trait.Trait.html) +//! - [`utility::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -50,7 +50,7 @@ //! * `as_derivative` - Dispatch a call from a derivative signed origin. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -74,9 +74,9 @@ use sp_runtime::{DispatchError, traits::Dispatchable}; pub use weights::WeightInfo; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// The overarching call type. type Call: Parameter + Dispatchable @@ -88,7 +88,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Utility {} + trait Store for Module as Utility {} } decl_event! { @@ -111,7 +111,7 @@ impl TypeId for IndexedUtilityModuleId { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Deposit one of this module's events by using the default implementation. fn deposit_event() = default; @@ -122,7 +122,7 @@ decl_module! { /// - `calls`: The calls to be dispatched from the same origin. /// /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Trait::BaseCallFilter`). + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -149,7 +149,7 @@ decl_module! { } }, )] - fn batch(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { + fn batch(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); // Track the actual weight of each of the batch calls. @@ -197,7 +197,7 @@ decl_module! { .saturating_add(T::DbWeight::get().reads_writes(1, 1)), call.get_dispatch_info().class, )] - fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResultWithPostInfo { + fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); @@ -222,7 +222,7 @@ decl_module! { /// - `calls`: The calls to be dispatched from the same origin. /// /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Trait::BaseCallFilter`). + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -244,7 +244,7 @@ decl_module! { }, )] #[transactional] - fn batch_all(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { + fn batch_all(origin, calls: Vec<::Call>) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); // Track the actual weight of each of the batch calls. @@ -274,7 +274,7 @@ decl_module! { } } -impl Module { +impl Module { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index a3c33bdf2081f26d1963b6a0766bcb11d66cb751..95973a8823f5caef5f3cce9b56a6ceef6d9ea02a 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -30,17 +30,17 @@ use frame_support::{ storage, }; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use crate as utility; // example module to test behaviors. pub mod example { use super::*; use frame_support::dispatch::WithPostDispatchInfo; - pub trait Trait: frame_system::Trait { } + pub trait Config: frame_system::Config { } decl_module! { - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { #[weight = *weight] fn noop(_origin, weight: Weight) { } @@ -93,12 +93,14 @@ impl_outer_dispatch! { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = Weight::max_value(); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::max_value()); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -110,13 +112,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -127,7 +122,7 @@ impl frame_system::Trait for Test { parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); type Balance = u64; type DustRemoval = (); @@ -142,7 +137,7 @@ parameter_types! { pub const MaxSignatories: u16 = 3; } -impl example::Trait for Test {} +impl example::Config for Test {} pub struct TestBaseCallFilter; impl Filter for TestBaseCallFilter { @@ -158,7 +153,7 @@ impl Filter for TestBaseCallFilter { } } } -impl Trait for Test { +impl Config for Test { type Event = TestEvent; type Call = Call; type WeightInfo = (); @@ -350,6 +345,7 @@ fn batch_early_exit_works() { #[test] fn batch_weight_calculation_doesnt_overflow() { + use sp_runtime::Perbill; new_test_ext().execute_with(|| { let big_call = Call::System(SystemCall::fill_block(Perbill::from_percent(50))); assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); @@ -428,7 +424,7 @@ fn batch_handles_weight_refund() { assert_eq!( extract_actual_weight(&result, &info), // Real weight is 2 calls at end_weight - ::WeightInfo::batch(2) + end_weight * 2, + ::WeightInfo::batch(2) + end_weight * 2, ); }); } @@ -465,7 +461,7 @@ fn batch_all_revert() { ]), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { - actual_weight: Some(::WeightInfo::batch_all(2) + info.weight * 2), + actual_weight: Some(::WeightInfo::batch_all(2) + info.weight * 2), pays_fee: Pays::Yes }, error: pallet_balances::Error::::InsufficientBalance.into() @@ -536,7 +532,7 @@ fn batch_all_handles_weight_refund() { assert_eq!( extract_actual_weight(&result, &info), // Real weight is 2 calls at end_weight - ::WeightInfo::batch_all(2) + end_weight * 2, + ::WeightInfo::batch_all(2) + end_weight * 2, ); }); } diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 73e4e3b1d93b9614c89d5a46aa2da7d45a01306d..c03ef0d064b99ac3dc7dd57d171b909622cc6d79 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -51,7 +51,7 @@ pub trait WeightInfo { /// Weights for pallet_utility using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn batch(c: u32, ) -> Weight { (20_071_000 as Weight) .saturating_add((2_739_000 as Weight).saturating_mul(c as Weight)) diff --git a/frame/vesting/README.md b/frame/vesting/README.md index 921fa94a1a2a99a14eb6f80798f41b813d6fb7bd..811b0dc44152d31da954ed68e40e1251da82c8ca 100644 --- a/frame/vesting/README.md +++ b/frame/vesting/README.md @@ -26,6 +26,6 @@ This module implements the `VestingSchedule` trait. "vested" so far. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 652d10aab3ae4524d63bb563b42d313122859dcb..0cb030668d0504bef795848d026d685a6ca43e88 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -29,9 +29,9 @@ use crate::Module as Vesting; const SEED: u32 = 0; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -fn add_locks(who: &T::AccountId, n: u8) { +fn add_locks(who: &T::AccountId, n: u8) { for id in 0..n { let lock_id = [id; 8]; let locked = 100u32; @@ -40,7 +40,7 @@ fn add_locks(who: &T::AccountId, n: u8) { } } -fn add_vesting_schedule(who: &T::AccountId) -> Result<(), &'static str> { +fn add_vesting_schedule(who: &T::AccountId) -> Result<(), &'static str> { let locked = 100u32; let per_block = 10u32; let starting_block = 1u32; diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index c09516c2cc27c9d439df301675f302b6ed1d29ce..a7a8147a062f5c63296ba58ea5d694916bf3dc50 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -17,7 +17,7 @@ //! # Vesting Module //! -//! - [`vesting::Trait`](./trait.Trait.html) +//! - [`vesting::Config`](./trait.Config.html) //! - [`Call`](./enum.Call.html) //! //! ## Overview @@ -43,7 +43,7 @@ //! "vested" so far. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] @@ -64,12 +64,12 @@ use frame_support::traits::{ use frame_system::{ensure_signed, ensure_root}; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency trait. type Currency: LockableCurrency; @@ -120,7 +120,7 @@ impl< } decl_storage! { - trait Store for Module as Vesting { + trait Store for Module as Vesting { /// Information regarding the vesting of a given account. pub Vesting get(fn vesting): map hasher(blake2_128_concat) T::AccountId @@ -156,7 +156,7 @@ decl_storage! { } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { /// The amount vested has been updated. This could indicate more funds are available. The /// balance given is the amount which is left unvested (and thus locked). /// \[account, unvested\] @@ -168,7 +168,7 @@ decl_event!( decl_error! { /// Error for the vesting module. - pub enum Error for Module { + pub enum Error for Module { /// The account given is not vesting. NotVesting, /// An existing vesting schedule already exists for this account that cannot be clobbered. @@ -180,7 +180,7 @@ decl_error! { decl_module! { /// Vesting module declaration. - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The minimum amount to be transferred to create a new vesting schedule. @@ -309,7 +309,7 @@ decl_module! { } } -impl Module { +impl Module { /// (Re)set or remove the module's currency lock on `who`'s account in accordance with their /// current unvested amount. fn update_lock(who: T::AccountId) -> DispatchResult { @@ -330,7 +330,7 @@ impl Module { } } -impl VestingSchedule for Module where +impl VestingSchedule for Module where BalanceOf: MaybeSerializeDeserialize + Debug { type Moment = T::BlockNumber; @@ -391,11 +391,10 @@ mod tests { use super::*; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, + assert_ok, assert_noop, impl_outer_origin, parameter_types, }; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, Identity, BadOrigin}, }; @@ -409,12 +408,14 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -426,13 +427,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -443,7 +437,7 @@ mod tests { parameter_types! { pub const MaxLocks: u32 = 10; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); type Event = (); @@ -456,7 +450,7 @@ mod tests { pub const MinVestedTransfer: u64 = 256 * 2; pub static ExistentialDeposit: u64 = 0; } - impl Trait for Test { + impl Config for Test { type Event = (); type Currency = Balances; type BlockNumberToBalance = Identity; diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 23a46ec763d89810c2b5af4b9dfd1bd5d97db3fb..3d2d6dd9670eb92b2b1da1204477708606842761 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -54,7 +54,7 @@ pub trait WeightInfo { /// Weights for pallet_vesting using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { +impl WeightInfo for SubstrateWeight { fn vest_locked(l: u32, ) -> Weight { (57_472_000 as Weight) .saturating_add((155_000 as Weight).saturating_mul(l as Weight)) diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml index 93991a4aeb2ab5969944d41a4a0d0093e48b977e..130723730c4edaa1d264a96afb33f73676fd1f16 100644 --- a/primitives/allocator/Cargo.toml +++ b/primitives/allocator/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] sp-std = { version = "2.0.0", path = "../std", default-features = false } sp-core = { version = "2.0.0", path = "../core", default-features = false } sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } -log = { version = "0.4.8", optional = true } -derive_more = { version = "0.99.2", optional = true } +log = { version = "0.4.11", optional = true } +thiserror = { version = "1.0.21", optional = true } [features] default = [ "std" ] @@ -27,5 +27,5 @@ std = [ "sp-core/std", "sp-wasm-interface/std", "log", - "derive_more", + "thiserror", ] diff --git a/primitives/allocator/src/error.rs b/primitives/allocator/src/error.rs index 7b634af4d5b295e4fd0cc3691778186319bcde75..77c911cef9d59a20aeb84f59536d1604f8c08b38 100644 --- a/primitives/allocator/src/error.rs +++ b/primitives/allocator/src/error.rs @@ -17,23 +17,15 @@ /// The error type used by the allocators. #[derive(sp_core::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(derive_more::Display))] +#[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum Error { /// Someone tried to allocate more memory than the allowed maximum per allocation. - #[cfg_attr(feature = "std", display(fmt="Requested allocation size is too large"))] + #[cfg_attr(feature = "std", error("Requested allocation size is too large"))] RequestedAllocationTooLarge, /// Allocator run out of space. - #[cfg_attr(feature = "std", display(fmt="Allocator ran out of space"))] + #[cfg_attr(feature = "std", error("Allocator ran out of space"))] AllocatorOutOfSpace, /// Some other error occurred. + #[cfg_attr(feature = "std", error("Other: {0}"))] Other(&'static str) } - -#[cfg(feature = "std")] -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - _ => None, - } - } -} diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index a3c480e92135f3b3347f1726f396c3a7cde8a8eb..92bf9bea2bdc7c250e6032278b8a81b2c503f7c9 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" sp-version = { version = "2.0.0", default-features = false, path = "../version" } sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } hash-db = { version = "0.15.2", optional = true } +thiserror = { version = "1.0.21", optional = true } [dev-dependencies] sp-test-primitives = { version = "2.0.0", path = "../test-primitives" } @@ -35,4 +36,5 @@ std = [ "sp-state-machine", "sp-version/std", "hash-db", + "thiserror", ] diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index a628ade6f9b476a2b2604b456c215a8545b2a27d..aebefe7ea03a4fdd2675d8e240d2dc56106c23e4 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -708,13 +708,7 @@ impl<'a> ToClientSideDecl<'a> { }, #crate_::NativeOrEncoded::Encoded(r) => { <#ret_type as #crate_::Decode>::decode(&mut &r[..]) - .map_err(|err| - format!( - "Failed to decode result of `{}`: {}", - #function_name, - err.what(), - ).into() - ) + .map_err(|err| { #crate_::ApiError::new(#function_name, err).into() }) } } ) diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 3e2fd42951b3c56b27ce86409517c27dc327947f..14cf47fc64b25e8d78361e94e94e6f4af70bf630 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -69,7 +69,9 @@ fn implement_common_api_traits( ) -> Result { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let error_type = error_type.map(|e| quote!(#e)).unwrap_or_else(|| quote!(String)); + let error_type = error_type + .map(|e| quote!(#e)) + .unwrap_or_else(|| quote!( #crate_::ApiError ) ); // Quote using the span from `error_type` to generate nice error messages when the type is // not implementing a trait or similar. diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 9dadce3b55452c36d35464fb8904feac79fa9a81..96da63cf2e253eed70b3cf3075cc89777ca588bf 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -74,6 +74,7 @@ use sp_core::OpaqueMetadata; #[cfg(feature = "std")] use std::{panic::UnwindSafe, cell::RefCell}; + /// Maximum nesting level for extrinsics. pub const MAX_EXTRINSIC_DEPTH: u32 = 256; @@ -288,7 +289,7 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// /// Sets the error type that is being used by the mock implementation. /// /// The error type is used by all runtime apis. It is only required to /// /// be specified in one trait implementation. -/// type Error = String; +/// type Error = sp_api::ApiError; /// /// fn build_block() -> Block { /// unimplemented!("Not Required in tests") @@ -315,6 +316,7 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// # use sp_runtime::{traits::Block as BlockT, generic::BlockId}; /// # use sp_test_primitives::Block; /// # use sp_core::NativeOrEncoded; +/// # use codec; /// # /// # sp_api::decl_runtime_apis! { /// # /// Declare the api trait. @@ -331,15 +333,15 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// /// sp_api::mock_impl_runtime_apis! { /// impl Balance for MockApi { -/// type Error = String; +/// type Error = sp_api::ApiError; /// #[advanced] -/// fn get_balance(&self, at: &BlockId) -> Result, String> { +/// fn get_balance(&self, at: &BlockId) -> Result, Self::Error> { /// println!("Being called at: {}", at); /// /// Ok(self.balance.into()) /// } /// #[advanced] -/// fn set_balance(at: &BlockId, val: u64) -> Result, String> { +/// fn set_balance(at: &BlockId, val: u64) -> Result, Self::Error> { /// if let BlockId::Number(1) = at { /// println!("Being called to set balance to: {}", val); /// } @@ -392,12 +394,42 @@ pub trait ConstructRuntimeApi> { fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; } +/// An error describing which API call failed. +#[cfg_attr(feature = "std", derive(Debug, thiserror::Error, Eq, PartialEq))] +#[cfg_attr(feature = "std", error("Failed to execute API call {tag}"))] +#[cfg(feature = "std")] +pub struct ApiError { + tag: &'static str, + #[source] + error: codec::Error, +} + +#[cfg(feature = "std")] +impl From<(&'static str, codec::Error)> for ApiError { + fn from((tag, error): (&'static str, codec::Error)) -> Self { + Self { + tag, + error, + } + } +} + +#[cfg(feature = "std")] +impl ApiError { + pub fn new(tag: &'static str, error: codec::Error) -> Self { + Self { + tag, + error, + } + } +} + /// Extends the runtime api traits with an associated error type. This trait is given as super /// trait to every runtime api trait. #[cfg(feature = "std")] pub trait ApiErrorExt { /// Error type used by the runtime apis. - type Error: std::fmt::Debug + From; + type Error: std::fmt::Debug + From; } /// Extends the runtime api implementation with some common functionality. @@ -506,7 +538,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend { /// Error type used by the implementation. - type Error: std::fmt::Debug + From; + type Error: std::fmt::Debug + From; /// The state backend that is used to store the block states. type StateBackend: StateBackend>; diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 867cdd6e57e48f6b63f7739be91c4fb49db759ea..1110b02020b3e7f008529717048e67476a9c2adf 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -21,7 +21,7 @@ sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } codec = { package = "parity-scale-codec", version = "1.3.1" } sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -trybuild = "1.0.17" +trybuild = { git = "https://github.com/bkchr/trybuild.git", branch = "bkchr-use-workspace-cargo-lock" } rustversion = "1.0.0" [dev-dependencies] diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index 594882baf1e34f9b63e8344b53594e18da39a043..be549d7b7f4cd93f7fe300558821618e407c7733 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -17,6 +17,7 @@ use sp_api::{ RuntimeApiInfo, decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, + ApiError, ApiExt, }; use sp_runtime::{traits::{GetNodeBlockType, Block as BlockT}, generic::BlockId}; @@ -103,17 +104,27 @@ mock_impl_runtime_apis! { } #[advanced] - fn same_name(_: &BlockId) -> std::result::Result, String> { + fn same_name(_: &BlockId) -> + std::result::Result< + NativeOrEncoded<()>, + ApiError + > + { Ok(().into()) } #[advanced] - fn wild_card(at: &BlockId, _: u32) -> std::result::Result, String> { + fn wild_card(at: &BlockId, _: u32) -> + std::result::Result< + NativeOrEncoded<()>, + ApiError + > + { if let BlockId::Number(1337) = at { // yeah Ok(().into()) } else { - Err("Ohh noooo".into()) + Err(ApiError::new("MockApi", codec::Error::from("Ohh noooo"))) } } } @@ -197,5 +208,8 @@ fn mock_runtime_api_works_with_advanced() { Api::::same_name(&mock, &BlockId::Number(0)).unwrap(); mock.wild_card(&BlockId::Number(1337), 1).unwrap(); - assert_eq!(String::from("Ohh noooo"), mock.wild_card(&BlockId::Number(1336), 1).unwrap_err()); + assert_eq!( + ApiError::new("MockApi", ::codec::Error::from("Ohh noooo")), + mock.wild_card(&BlockId::Number(1336), 1).unwrap_err() + ); } diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index 2f7fd6d06bcd34c3724d6fa39422d4f7890392cc..f23c7291e8ef7ad11229baa4b277bc2f12fdf06c 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs index 1e71730cd0a17ae5745ad351c94062081e8b6b40..fd654ffdc63d666f33d429b154444dd8f7d2e67c 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs @@ -1,4 +1,5 @@ use substrate_test_runtime_client::runtime::Block; +use sp_api::ApiError; sp_api::decl_runtime_apis! { pub trait Api { @@ -11,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self, _: BlockId) -> Result, String> { + fn test(&self, _: BlockId) -> Result, ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index efddce05f51b4a24038b97ab07cf81ce5bde0407..47cd9e01d910f32be33fce5ae7dcf12aa9f7d890 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -1,13 +1,13 @@ error: `BlockId` needs to be taken by reference and not by value! - --> $DIR/mock_advanced_block_id_by_value.rs:11:1 + --> $DIR/mock_advanced_block_id_by_value.rs:12:1 | -11 | / sp_api::mock_impl_runtime_apis! { -12 | | impl Api for MockApi { -13 | | #[advanced] -14 | | fn test(&self, _: BlockId) -> Result, String> { +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | #[advanced] +15 | | fn test(&self, _: BlockId) -> Result, ApiError> { ... | -17 | | } -18 | | } +18 | | } +19 | | } | |_^ | = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs index 407ea90ee882d3a898caa9def94a809efb6df80c..a15ef133fa6c457c46ad1c4386078abfb48a50ab 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs @@ -1,4 +1,5 @@ use substrate_test_runtime_client::runtime::Block; +use sp_api::ApiError; sp_api::decl_runtime_apis! { pub trait Api { @@ -11,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self) -> Result, String> { + fn test(&self) -> Result, ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr index e7a66ebc5dba8097a0f6c740e3b6379e77a9a4e7..87d3660316b1e1f9e40c3721fd17b9c3ca2105d5 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr @@ -1,5 +1,5 @@ error: If using the `advanced` attribute, it is required that the function takes at least one argument, the `BlockId`. - --> $DIR/mock_advanced_missing_blockid.rs:14:3 + --> $DIR/mock_advanced_missing_blockid.rs:15:3 | -14 | fn test(&self) -> Result, String> { +15 | fn test(&self) -> Result, ApiError> { | ^^ diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr index daac5674d6ffe8aafa57d29cf17071886d0fca08..82fd04e8c5e040e7d97d46968b1d60a78de9bcdd 100644 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr +++ b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr @@ -10,16 +10,16 @@ error: First error type was declared here. 17 | type Error = u32; | ^^^ -error[E0277]: the trait bound `u32: std::convert::From` is not satisfied +error[E0277]: the trait bound `u32: std::convert::From` is not satisfied --> $DIR/mock_only_one_error_type.rs:17:16 | 17 | type Error = u32; - | ^^^ the trait `std::convert::From` is not implemented for `u32` + | ^^^ the trait `std::convert::From` is not implemented for `u32` | ::: $WORKSPACE/primitives/api/src/lib.rs | - | type Error: std::fmt::Debug + From; - | ------------ required by this bound in `sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ApiErrorExt` + | type Error: std::fmt::Debug + From; + | -------------- required by this bound in `sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ApiErrorExt` | = help: the following implementations were found: > diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 0e7fdc7559ca1733dd0658244065692a3d9a562b..f132e04deaa08fb204d35528cdc47a337e3e3738 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -sp-keystore = { version = "0.8.0", path = "../../keystore" } +sp-keystore = { version = "0.8.0", path = "../../keystore", default-features = false } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } sp-runtime = { version = "2.0.0", path = "../../runtime" } sp-api = { version = "2.0.0", path = "../../api" } diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 035a704ba3009dfe622f812e2271b6906f7c88c3..59d98eea2b780388defbc8d0c87692e226889262 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -61,13 +61,11 @@ pub trait PerThing: fn is_one(&self) -> bool { self.deconstruct() == Self::ACCURACY } /// Build this type from a percent. Equivalent to `Self::from_parts(x * Self::ACCURACY / 100)` - /// but more accurate. + /// but more accurate and can cope with potential type overflows. fn from_percent(x: Self::Inner) -> Self { - let a = x.min(100.into()); - let b = Self::ACCURACY; - // if Self::ACCURACY % 100 > 0 then we need the correction for accuracy - let c = rational_mul_correction::(b, a, 100.into(), Rounding::Nearest); - Self::from_parts(a / 100.into() * b + c) + let a: Self::Inner = x.min(100.into()); + let b: Self::Inner = 100.into(); + Self::from_rational_approximation(a, b) } /// Return the product of multiplication of this value by itself. @@ -334,7 +332,7 @@ macro_rules! implement_per_thing { &self.0 } fn decode_from(x: Self::As) -> Self { - // Saturates if `x` is more than `$max` internally. + // Saturates if `x` is more than `$max` internally. Self::from_parts(x) } } @@ -707,6 +705,7 @@ macro_rules! implement_per_thing { assert_eq!($name::from_percent(0), $name::from_parts(Zero::zero())); assert_eq!($name::from_percent(10), $name::from_parts($max / 10)); + assert_eq!($name::from_percent(50), $name::from_parts($max / 2)); assert_eq!($name::from_percent(100), $name::from_parts($max)); assert_eq!($name::from_percent(200), $name::from_parts($max)); @@ -717,6 +716,15 @@ macro_rules! implement_per_thing { assert_eq!($name::from_fraction(-1.0), $name::from_parts(Zero::zero())); } + #[test] + fn percent_trait_impl_works() { + assert_eq!(<$name as PerThing>::from_percent(0), $name::from_parts(Zero::zero())); + assert_eq!(<$name as PerThing>::from_percent(10), $name::from_parts($max / 10)); + assert_eq!(<$name as PerThing>::from_percent(50), $name::from_parts($max / 2)); + assert_eq!(<$name as PerThing>::from_percent(100), $name::from_parts($max)); + assert_eq!(<$name as PerThing>::from_percent(200), $name::from_parts($max)); + } + macro_rules! u256ify { ($val:expr) => { Into::::into($val) diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index f714aaaa1dae1c3f37e2cdae03e203daf2d2fa00..3458b8c0846ba4810e24ef3362961936e38ac407 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -15,12 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" -lru = "0.4.0" +lru = "0.6.1" parking_lot = "0.10.0" thiserror = "1.0.21" +futures = "0.3" codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } sp-consensus = { version = "0.8.0", path = "../consensus/common" } sp-runtime = { version = "2.0.0", path = "../runtime" } -sp-block-builder = { version = "2.0.0", path = "../block-builder" } sp-state-machine = { version = "0.8.0", path = "../state-machine" } sp-database = { version = "2.0.0", path = "../database" } +sp-api = { version = "2.0.0", path = "../api" } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 1328dfb5752fc0544e33c6b9934965dd3158dded..01a7a59d6f94fc636ce938c3866c9c8e5ae950f2 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -53,7 +53,7 @@ pub trait HeaderBackend: Send + Sync { /// Convert an arbitrary block ID into a block hash. fn block_number_from_id(&self, id: &BlockId) -> Result>> { match *id { - BlockId::Hash(_) => Ok(self.header(*id)?.map(|h| h.number().clone())), + BlockId::Hash(h) => self.number(h), BlockId::Number(n) => Ok(Some(n)), } } @@ -172,7 +172,7 @@ pub trait Backend: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata = result::Result; /// Error when the runtime failed to apply an extrinsic. #[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum ApplyExtrinsicFailed { /// The transaction cannot be included into the current block. /// @@ -35,114 +37,142 @@ pub enum ApplyExtrinsicFailed { /// unappliable onto the current block. #[error("Extrinsic is not valid: {0:?}")] Validity(#[from] TransactionValidityError), - /// This is used for miscellaneous errors that can be represented by string and not handleable. - /// - /// This will become obsolete with complete migration to v4 APIs. - #[error("Extrinsic failed: {0}")] - Msg(String), + + #[error("Application specific error")] + Application(#[source] Box), } /// Substrate Client error #[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] pub enum Error { - /// Consensus Error + #[error("Cancelled oneshot channel {0}")] + OneShotCancelled(#[from] futures::channel::oneshot::Canceled), + #[error(transparent)] Consensus(#[from] sp_consensus::Error), - /// Backend error. + #[error("Backend error: {0}")] Backend(String), - /// Unknown block. + #[error("UnknownBlock: {0}")] UnknownBlock(String), - /// The `apply_extrinsic` is not valid due to the given `TransactionValidityError`. + #[error(transparent)] ApplyExtrinsicFailed(#[from] ApplyExtrinsicFailed), - /// Execution error. + + #[error("Child type is invalid")] + InvalidChildType, + + #[error("RemoteBodyRequest: invalid extrinsics root expected: {expected} but got {received}")] + ExtrinsicRootInvalid { received: String, expected: String }, + + // `inner` cannot be made member, since it lacks `std::error::Error` trait bounds. #[error("Execution failed: {0:?}")] Execution(Box), - /// Blockchain error. + #[error("Blockchain")] Blockchain(#[source] Box), - /// Invalid authorities set received from the runtime. + + /// A error used by various storage subsystems. + /// + /// Eventually this will be replaced. + #[error("{0}")] + StorageChanges(sp_state_machine::DefaultError), + + #[error("Invalid child storage key")] + InvalidChildStorageKey, + #[error("Current state of blockchain has invalid authorities set")] InvalidAuthoritiesSet, - /// Could not get runtime version. + #[error("Failed to get runtime version: {0}")] VersionInvalid(String), - /// Genesis config is invalid. + #[error("Genesis config provided is invalid")] GenesisInvalid, - /// Error decoding header justification. + #[error("error decoding justification for header")] JustificationDecode, - /// Justification for header is correctly encoded, but invalid. + #[error("bad justification for header: {0}")] BadJustification(String), - /// Not available on light client. + #[error("This method is not currently available when running in light client mode")] NotAvailableOnLightClient, - /// Invalid remote CHT-based proof. + #[error("Remote node has responded with invalid header proof")] InvalidCHTProof, - /// Remote fetch has been cancelled. + #[error("Remote data fetch has been cancelled")] RemoteFetchCancelled, - /// Remote fetch has been failed. + #[error("Remote data fetch has been failed")] RemoteFetchFailed, - /// Error decoding call result. + #[error("Error decoding call result of {0}")] CallResultDecode(&'static str, #[source] CodecError), - /// Error converting a parameter between runtime and node. - #[error("Error converting `{0}` between runtime and node")] - RuntimeParamConversion(String), - /// Changes tries are not supported. + + #[error(transparent)] + RuntimeApiCodecError(#[from] ApiError), + + #[error("Runtime :code missing in storage")] + RuntimeCodeMissing, + #[error("Changes tries are not supported by the runtime")] ChangesTriesNotSupported, - /// Error reading changes tries configuration. + #[error("Error reading changes tries configuration")] ErrorReadingChangesTriesConfig, - /// Key changes query has failed. + #[error("Failed to check changes proof: {0}")] ChangesTrieAccessFailed(String), - /// Last finalized block not parent of current. + #[error("Did not finalize blocks in sequential order.")] NonSequentialFinalization(String), - /// Safety violation: new best block not descendent of last finalized. + #[error("Potential long-range attack: block not in finalized chain.")] NotInFinalizedChain, - /// Hash that is required for building CHT is missing. + #[error("Failed to get hash of block for building CHT")] MissingHashRequiredForCHT, - /// Invalid calculated state root on block import. + #[error("Calculated state root does not match.")] InvalidStateRoot, - /// Incomplete block import pipeline. + #[error("Incomplete block import pipeline.")] IncompletePipeline, + #[error("Transaction pool not ready for block production.")] TransactionPoolNotReady, + #[error("Database")] DatabaseError(#[from] sp_database::error::DatabaseError), - /// A convenience variant for String - #[error("{0}")] - Msg(String), -} -impl<'a> From<&'a str> for Error { - fn from(s: &'a str) -> Self { - Error::Msg(s.into()) - } -} + #[error("Failed to get header for hash {0}")] + MissingHeader(String), -impl From for Error { - fn from(s: String) -> Self { - Error::Msg(s) - } + + #[error("State Database error: {0}")] + StateDatabase(String), + + #[error(transparent)] + Application(#[from] Box), + + // Should be removed/improved once + // the storage `fn`s returns typed errors. + #[error("Runtime code error: {0}")] + RuntimeCode(&'static str), + + // Should be removed/improved once + // the storage `fn`s returns typed errors. + #[error("Storage error: {0}")] + Storage(String), } -impl From> for Error { - fn from(e: Box) -> Self { +impl From> for Error { + fn from(e: Box) -> Self { Self::from_state(e) } } @@ -163,4 +193,11 @@ impl Error { pub fn from_state(e: Box) -> Self { Error::Execution(e) } + + /// Construct from a state db error. + // Can not be done directly, since that would make cargo run out of stack if + // `sc-state-db` is lib is added as dependency. + pub fn from_state_db(e: E) -> Self where E: std::fmt::Debug { + Error::StateDatabase(format!("{:?}", e)) + } } diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 001f0e36794586bab35befa88f46e7eb6fc5e6a0..dc1550ed2953ca89fd4a65d06677112de39c2a90 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.21" -libp2p = { version = "0.30.1", default-features = false } +libp2p = { version = "0.32.2", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "2.0.0"} sp-inherents = { version = "2.0.0", path = "../../inherents" } @@ -33,7 +33,7 @@ codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} -wasm-timer = "0.2.4" +wasm-timer = "0.2.5" [dev-dependencies] futures = "0.3.4" diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 5e593da1163d77a65be2ca7bfaa9cd9f4114d71b..0100041fc0a0ccdd8d9c765ba554c0302814fc23 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -26,7 +26,7 @@ use std::sync::Arc; use std::any::Any; use crate::Error; -use crate::import_queue::{Verifier, CacheKeyId}; +use crate::import_queue::CacheKeyId; /// Block import result. #[derive(Debug, PartialEq, Eq)] @@ -54,8 +54,6 @@ pub struct ImportedAux { pub needs_justification: bool, /// Received a bad justification. pub bad_justification: bool, - /// Request a finality proof for the given block. - pub needs_finality_proof: bool, /// Whether the block that was imported is the new best block. pub is_new_best: bool, } @@ -63,7 +61,7 @@ pub struct ImportedAux { impl ImportResult { /// Returns default value for `ImportResult::Imported` with /// `clear_justification_requests`, `needs_justification`, - /// `bad_justification` and `needs_finality_proof` set to false. + /// `bad_justification` set to false. pub fn imported(is_new_best: bool) -> ImportResult { let mut aux = ImportedAux::default(); aux.is_new_best = is_new_best; @@ -345,21 +343,3 @@ pub trait JustificationImport { justification: Justification, ) -> Result<(), Self::Error>; } - -/// Finality proof import trait. -pub trait FinalityProofImport { - type Error: std::error::Error + Send + 'static; - - /// Called by the import queue when it is started. Returns a list of finality proofs to request - /// from the network. - fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { Vec::new() } - - /// Import a Block justification and finalize the given block. Returns finalized block or error. - fn import_finality_proof( - &mut self, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(B::Hash, NumberFor), Self::Error>; -} diff --git a/primitives/consensus/common/src/block_validation.rs b/primitives/consensus/common/src/block_validation.rs index f8255130e64160617066ec84f1991ff0e5838f02..b926144159574dbd5409fb62cad4d1decb1abda5 100644 --- a/primitives/consensus/common/src/block_validation.rs +++ b/primitives/consensus/common/src/block_validation.rs @@ -42,7 +42,12 @@ pub enum Validation { is_new_best: bool, }, /// Invalid block announcement. - Failure, + Failure { + /// Should we disconnect from this peer? + /// + /// This should be used if the peer for example send junk to spam us. + disconnect: bool, + }, } /// Type which checks incoming block announcements. @@ -68,8 +73,20 @@ impl BlockAnnounceValidator for DefaultBlockAnnounceValidator { fn validate( &mut self, _: &B::Header, - _: &[u8], + data: &[u8], ) -> Pin>> + Send>> { - async { Ok(Validation::Success { is_new_best: false }) }.boxed() + let is_empty = data.is_empty(); + + async move { + if !is_empty { + log::debug!( + target: "sync", + "Received unknown data alongside the block announcement.", + ); + Ok(Validation::Failure { disconnect: true }) + } else { + Ok(Validation::Success { is_new_best: false }) + } + }.boxed() } } diff --git a/primitives/consensus/common/src/error.rs b/primitives/consensus/common/src/error.rs index a21bcf6cca9b23c8367681317c9e246d6a42d88a..11b24d273d5ecbca5b2ccdc66fd08a0838a083f6 100644 --- a/primitives/consensus/common/src/error.rs +++ b/primitives/consensus/common/src/error.rs @@ -25,6 +25,7 @@ pub type Result = std::result::Result; /// Error type. #[derive(Debug, thiserror::Error)] +#[non_exhaustive] pub enum Error { /// Missing state at block with given descriptor. #[error("State unavailable at block {0}")] diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 92bd9966d75ec031778f71a638093c555a3f647c..b32ca0133d9954f539313d54b7b0186ec17f9dde 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -34,7 +34,7 @@ use crate::{ error::Error as ConsensusError, block_import::{ BlockImport, BlockOrigin, BlockImportParams, ImportedAux, JustificationImport, ImportResult, - BlockCheckParams, FinalityProofImport, + BlockCheckParams, }, metrics::Metrics, }; @@ -56,11 +56,6 @@ pub type BoxBlockImport = Box< /// Shared justification import struct used by the queue. pub type BoxJustificationImport = Box + Send + Sync>; -/// Shared finality proof import struct used by the queue. -pub type BoxFinalityProofImport = Box< - dyn FinalityProofImport + Send + Sync ->; - /// Maps to the Origin used by the network. pub type Origin = libp2p::PeerId; @@ -115,15 +110,6 @@ pub trait ImportQueue: Send { number: NumberFor, justification: Justification ); - /// Import block finality proof. - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ); - /// Polls for actions to perform on the network. /// /// This method should behave in a way similar to `Future::poll`. It can register the current @@ -146,24 +132,11 @@ pub trait Link: Send { fn justification_imported(&mut self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} /// Request a justification for the given block. fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} - /// Finality proof import result. - /// - /// Even though we have asked for finality proof of block A, provider could return proof of - /// some earlier block B, if the proof for A was too large. The sync module should continue - /// asking for proof of A in this case. - fn finality_proof_imported( - &mut self, - _who: Origin, - _request_block: (B::Hash, NumberFor), - _finalization_result: Result<(B::Hash, NumberFor), ()>, - ) {} - /// Request a finality proof for the given block. - fn request_finality_proof(&mut self, _hash: &B::Hash, _number: NumberFor) {} } /// Block import successful result. #[derive(Debug, PartialEq)] -pub enum BlockImportResult { +pub enum BlockImportResult { /// Imported known block. ImportedKnown(N), /// Imported unknown block. diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index ea0ca2cf3ee8848f9cfa9c12b02da5d966aac484..b426c39100e697d3def9d36c115bf7e794f08e47 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -25,7 +25,7 @@ use prometheus_endpoint::Registry; use crate::{ block_import::BlockOrigin, import_queue::{ - BlockImportResult, BlockImportError, Verifier, BoxBlockImport, BoxFinalityProofImport, + BlockImportResult, BlockImportError, Verifier, BoxBlockImport, BoxJustificationImport, ImportQueue, Link, Origin, IncomingBlock, import_single_block_metered, buffered_link::{self, BufferedLinkSender, BufferedLinkReceiver}, @@ -36,8 +36,8 @@ use crate::{ /// Interface to a basic block import queue that is importing blocks sequentially in a separate /// task, with plugable verification. pub struct BasicQueue { - /// Channel to send finality work messages to the background task. - finality_sender: TracingUnboundedSender>, + /// Channel to send justifcation import messages to the background task. + justification_sender: TracingUnboundedSender>, /// Channel to send block import messages to the background task. block_import_sender: TracingUnboundedSender>, /// Results coming from the worker task. @@ -48,7 +48,7 @@ pub struct BasicQueue { impl Drop for BasicQueue { fn drop(&mut self) { // Flush the queue and close the receiver to terminate the future. - self.finality_sender.close_channel(); + self.justification_sender.close_channel(); self.block_import_sender.close_channel(); self.result_port.close(); } @@ -57,13 +57,11 @@ impl Drop for BasicQueue { impl BasicQueue { /// Instantiate a new basic queue, with given verifier. /// - /// This creates a background task, and calls `on_start` on the justification importer and - /// finality proof importer. + /// This creates a background task, and calls `on_start` on the justification importer. pub fn new>( verifier: V, block_import: BoxBlockImport, justification_import: Option>, - finality_proof_import: Option>, spawner: &impl sp_core::traits::SpawnNamed, prometheus_registry: Option<&Registry>, ) -> Self { @@ -77,19 +75,18 @@ impl BasicQueue { .ok() }); - let (future, finality_sender, block_import_sender) = BlockImportWorker::new( + let (future, justification_sender, block_import_sender) = BlockImportWorker::new( result_sender, verifier, block_import, justification_import, - finality_proof_import, metrics, ); spawner.spawn_blocking("basic-block-import-worker", future.boxed()); Self { - finality_sender, + justification_sender, block_import_sender, result_port, _phantom: PhantomData, @@ -122,8 +119,8 @@ impl ImportQueue for BasicQueue number: NumberFor, justification: Justification, ) { - let res = self.finality_sender.unbounded_send( - worker_messages::Finality::ImportJustification(who, hash, number, justification), + let res = self.justification_sender.unbounded_send( + worker_messages::ImportJustification(who, hash, number, justification), ); if res.is_err() { @@ -134,26 +131,6 @@ impl ImportQueue for BasicQueue } } - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - ) { - trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash); - let res = self.finality_sender.unbounded_send( - worker_messages::Finality::ImportFinalityProof(who, hash, number, finality_proof), - ); - - if res.is_err() { - log::error!( - target: "sync", - "import_finality_proof: Background import task is no longer alive" - ); - } - } - fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { if self.result_port.poll_actions(cx, link).is_err() { log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); @@ -166,17 +143,12 @@ mod worker_messages { use super::*; pub struct ImportBlocks(pub BlockOrigin, pub Vec>); - - pub enum Finality { - ImportJustification(Origin, B::Hash, NumberFor, Justification), - ImportFinalityProof(Origin, B::Hash, NumberFor, Vec), - } + pub struct ImportJustification(pub Origin, pub B::Hash, pub NumberFor, pub Justification); } struct BlockImportWorker { result_sender: BufferedLinkSender, justification_import: Option>, - finality_proof_import: Option>, delay_between_blocks: Duration, metrics: Option, _phantom: PhantomData, @@ -188,17 +160,16 @@ impl BlockImportWorker { verifier: V, block_import: BoxBlockImport, justification_import: Option>, - finality_proof_import: Option>, metrics: Option, ) -> ( impl Future + Send, - TracingUnboundedSender>, + TracingUnboundedSender>, TracingUnboundedSender>, ) { use worker_messages::*; - let (finality_sender, mut finality_port) = - tracing_unbounded("mpsc_import_queue_worker_finality"); + let (justification_sender, mut justification_port) = + tracing_unbounded("mpsc_import_queue_worker_justification"); let (block_import_sender, mut block_import_port) = tracing_unbounded("mpsc_import_queue_worker_blocks"); @@ -206,23 +177,17 @@ impl BlockImportWorker { let mut worker = BlockImportWorker { result_sender, justification_import, - finality_proof_import, delay_between_blocks: Duration::new(0, 0), metrics, _phantom: PhantomData, }; - // Let's initialize `justification_import` and `finality_proof_import`. + // Let's initialize `justification_import` if let Some(justification_import) = worker.justification_import.as_mut() { for (hash, number) in justification_import.on_start() { worker.result_sender.request_justification(&hash, number); } } - if let Some(finality_proof_import) = worker.finality_proof_import.as_mut() { - for (hash, number) in finality_proof_import.on_start() { - worker.result_sender.request_finality_proof(&hash, number); - } - } // The future below has two possible states: // @@ -230,7 +195,7 @@ impl BlockImportWorker { // `Future`, and `block_import` is `None`. // - Something else, in which case `block_import` is `Some` and `importing` is None. // - // Additionally, the task will prioritize processing of finality work messages over + // Additionally, the task will prioritize processing of justification import messages over // block import messages, hence why two distinct channels are used. let mut block_import_verifier = Some((block_import, verifier)); let mut importing = None; @@ -243,28 +208,15 @@ impl BlockImportWorker { return Poll::Ready(()) } - // Grab the next finality action request sent to the import queue. - let finality_work = match Stream::poll_next(Pin::new(&mut finality_port), cx) { - Poll::Ready(Some(msg)) => Some(msg), - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => None, - }; - - match finality_work { - Some(Finality::ImportFinalityProof(who, hash, number, proof)) => { - let (_, verif) = block_import_verifier - .as_mut() - .expect("block_import_verifier is always Some; qed"); - - worker.import_finality_proof(verif, who, hash, number, proof); - continue; - } - Some(Finality::ImportJustification(who, hash, number, justification)) => { + // Grab the next justification import request sent to the import queue. + match Stream::poll_next(Pin::new(&mut justification_port), cx) { + Poll::Ready(Some(ImportJustification(who, hash, number, justification))) => { worker.import_justification(who, hash, number, justification); continue; - } - None => {} - } + }, + Poll::Ready(None) => return Poll::Ready(()), + Poll::Pending => {}, + }; // If we are in the process of importing a bunch of blocks, let's resume this // process before doing anything more. @@ -299,7 +251,7 @@ impl BlockImportWorker { } }); - (future, finality_sender, block_import_sender) + (future, justification_sender, block_import_sender) } /// Returns a `Future` that imports the given blocks and sends the results on @@ -324,36 +276,6 @@ impl BlockImportWorker { }) } - fn import_finality_proof>( - &mut self, - verifier: &mut V, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ) { - let started = wasm_timer::Instant::now(); - let result = self.finality_proof_import.as_mut().map(|finality_proof_import| { - finality_proof_import.import_finality_proof(hash, number, finality_proof, verifier) - .map_err(|e| { - debug!( - "Finality proof import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", - e, - hash, - number, - who, - ); - }) - }).unwrap_or(Err(())); - - if let Some(metrics) = self.metrics.as_ref() { - metrics.finality_proof_import_time.observe(started.elapsed().as_secs_f64()); - } - - trace!(target: "sync", "Imported finality proof for {}/{}", number, hash); - self.result_sender.finality_proof_imported(who, (hash, number), result); - } - fn import_justification( &mut self, who: Origin, @@ -596,7 +518,7 @@ mod tests { let (result_sender, mut result_port) = buffered_link::buffered_link(); let (mut worker, mut finality_sender, mut block_import_sender) = - BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None, None); + BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None); let mut import_block = |n| { let header = Header { @@ -629,7 +551,7 @@ mod tests { let mut import_justification = || { let hash = Hash::random(); - block_on(finality_sender.send(worker_messages::Finality::ImportJustification( + block_on(finality_sender.send(worker_messages::ImportJustification( libp2p::PeerId::random(), hash, 1, diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/primitives/consensus/common/src/import_queue/buffered_link.rs index a37d4c53c260394dc335842d7723ace983d39472..db9bcc8f0ad63ce7e4985353da0d96834524d0e6 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/primitives/consensus/common/src/import_queue/buffered_link.rs @@ -81,8 +81,6 @@ enum BlockImportWorkerMsg { BlocksProcessed(usize, usize, Vec<(Result>, BlockImportError>, B::Hash)>), JustificationImported(Origin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), - FinalityProofImported(Origin, (B::Hash, NumberFor), Result<(B::Hash, NumberFor), ()>), - RequestFinalityProof(B::Hash, NumberFor), } impl Link for BufferedLinkSender { @@ -109,20 +107,6 @@ impl Link for BufferedLinkSender { fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestJustification(hash.clone(), number)); } - - fn finality_proof_imported( - &mut self, - who: Origin, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - let msg = BlockImportWorkerMsg::FinalityProofImported(who, request_block, finalization_result); - let _ = self.tx.unbounded_send(msg); - } - - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestFinalityProof(hash.clone(), number)); - } } /// See [`buffered_link`]. @@ -154,10 +138,6 @@ impl BufferedLinkReceiver { link.justification_imported(who, &hash, number, success), BlockImportWorkerMsg::RequestJustification(hash, number) => link.request_justification(&hash, number), - BlockImportWorkerMsg::FinalityProofImported(who, block, result) => - link.finality_proof_imported(who, block, result), - BlockImportWorkerMsg::RequestFinalityProof(hash, number) => - link.request_finality_proof(&hash, number), } } } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 988aa7a816c4244f861ff0ad59521a27d45a1fda..10fe8a2b315804fd850b9071327e00dfc67de852 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -49,7 +49,7 @@ mod metrics; pub use self::error::Error; pub use block_import::{ BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, BlockCheckParams, - ImportResult, JustificationImport, FinalityProofImport, + ImportResult, JustificationImport, }; pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; diff --git a/primitives/consensus/common/src/metrics.rs b/primitives/consensus/common/src/metrics.rs index a35b7c4968f7f6ee0d4e1995efbf1b9be8851f46..6e6b582e12594f489b2c39a04ff48c59edca41a1 100644 --- a/primitives/consensus/common/src/metrics.rs +++ b/primitives/consensus/common/src/metrics.rs @@ -30,7 +30,6 @@ pub(crate) struct Metrics { pub import_queue_processed: CounterVec, pub block_verification_time: HistogramVec, pub block_verification_and_import_time: Histogram, - pub finality_proof_import_time: Histogram, pub justification_import_time: Histogram, } @@ -63,15 +62,6 @@ impl Metrics { )?, registry, )?, - finality_proof_import_time: register( - Histogram::with_opts( - HistogramOpts::new( - "finality_proof_import_time", - "Time taken to import finality proofs", - ), - )?, - registry, - )?, justification_import_time: register( Histogram::with_opts( HistogramOpts::new( diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index f6989a0df4f0f3572c814ba0c6bb8abfc4a88709..1d0ff4f20828219f4107f5325e6eb64f9ee898f9 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -27,10 +27,10 @@ base58 = { version = "0.1.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } substrate-bip39 = { version = "0.4.2", optional = true } tiny-bip39 = { version = "0.8", optional = true } -regex = { version = "1.3.1", optional = true } +regex = { version = "1.4.2", optional = true } num-traits = { version = "0.2.8", default-features = false } -zeroize = { version = "1.0.0", default-features = false } -secrecy = { version = "0.6.0", default-features = false } +zeroize = { version = "1.2.0", default-features = false } +secrecy = { version = "0.7.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.10.0", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } @@ -96,11 +96,11 @@ std = [ "base58", "substrate-bip39", "tiny-bip39", - "serde", "byteorder/std", "rand", "sha2/std", "schnorrkel/std", + "schnorrkel/serde", "regex", "num-traits/std", "tiny-keccak", diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 2f34347a2d5e0b478132fbaca10f876b2a8211ea..f2101a6712429c01fc902411ef17b6f4f88fbb61 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -506,6 +506,10 @@ ss58_address_format!( (36, "centrifuge", "Centrifuge Chain mainnet, standard account (*25519).") NodleAccount => (37, "nodle", "Nodle Chain mainnet, standard account (*25519).") + KiltAccount => + (38, "kilt", "KILT Chain mainnet, standard account (*25519).") + PolimecAccount => + (41, "poli", "Polimec Chain mainnet, standard account (*25519).") SubstrateAccount => (42, "substrate", "Any Substrate network, standard account (*25519).") Reserved43 => @@ -1029,6 +1033,7 @@ pub trait CryptoType { Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode, PassByInner, crate::RuntimeDebug )] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct KeyTypeId(pub [u8; 4]); impl From for KeyTypeId { @@ -1058,10 +1063,12 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { /// An identifier for a specific cryptographic algorithm used by a key pair #[derive(Debug, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct CryptoTypeId(pub [u8; 4]); /// A type alias of CryptoTypeId & a public key #[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct CryptoTypePublicPair(pub CryptoTypeId, pub Vec); #[cfg(feature = "std")] diff --git a/primitives/core/src/hashing.rs b/primitives/core/src/hashing.rs index 98dc0c2efc5972226ff6999e6267d18a938d6e4a..6807da02feb07e84144db9ba944d46d2d476b1f0 100644 --- a/primitives/core/src/hashing.rs +++ b/primitives/core/src/hashing.rs @@ -16,6 +16,11 @@ // limitations under the License. //! Hashing functions. +//! +//! This module is gated by `full-crypto` feature. If you intend to use any of the functions +//! defined here within your runtime, you should most likely rather use [sp_io::hashing] instead, +//! unless you know what you're doing. Using `sp_io` will be more performant, since instead of +//! computing the hash in WASM it delegates that computation to the host client. use blake2_rfc; use sha2::{Digest, Sha256}; diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index 3fe34cc0cfa7b76cdc4b62c4c7279eca2118e650..5256f417711b9272b1e4921b5112287147d98a5e 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -70,6 +70,8 @@ pub struct TestPersistentOffchainDB { } impl TestPersistentOffchainDB { + const PREFIX: &'static [u8] = b""; + /// Create a new and empty offchain storage db for persistent items pub fn new() -> Self { Self { @@ -82,11 +84,16 @@ impl TestPersistentOffchainDB { let mut me = self.persistent.write(); for ((_prefix, key), value_operation) in changes.drain() { match value_operation { - OffchainOverlayedChange::SetValue(val) => me.set(b"", key.as_slice(), val.as_slice()), - OffchainOverlayedChange::Remove => me.remove(b"", key.as_slice()), + OffchainOverlayedChange::SetValue(val) => me.set(Self::PREFIX, key.as_slice(), val.as_slice()), + OffchainOverlayedChange::Remove => me.remove(Self::PREFIX, key.as_slice()), } } } + + /// Retrieve a key from the test backend. + pub fn get(&self, key: &[u8]) -> Option> { + OffchainStorage::get(self, Self::PREFIX, key) + } } impl OffchainStorage for TestPersistentOffchainDB { @@ -266,8 +273,8 @@ impl offchain::Externalities for TestOffchainExt { fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { let state = self.0.read(); match kind { - StorageKind::LOCAL => state.local_storage.get(b"", key), - StorageKind::PERSISTENT => state.persistent_storage.get(b"", key), + StorageKind::LOCAL => state.local_storage.get(TestPersistentOffchainDB::PREFIX, key), + StorageKind::PERSISTENT => state.persistent_storage.get(key), } } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 388482964f18c9a49fdeda1e5a60d5a1e44dc1a6..6869969f4ba138d618389e10b0ab603fffd01a5c 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -137,7 +137,17 @@ pub trait Externalities: ExtensionStore { ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, child_info: &ChildInfo); + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend. No + /// limit is applied if `limit` is `None`. Returns `true` if the child trie was + /// removed completely and `false` if there are remaining keys after the function + /// returns. + /// + /// # Note + /// + /// An implementation is free to delete more keys than the specified limit as long as + /// it is able to do that in constant time. + fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> bool; /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index e8483b2ef68c5c9c19c3adaab84a823ee4a9b36b..e470461d60b8c9438b60b2f565c3ffec039a1e9c 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -30,7 +30,7 @@ sp-tracing = { version = "2.0.0", default-features = false, path = "../tracing" log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } parking_lot = { version = "0.10.0", optional = true } -tracing = { version = "0.1.19", default-features = false } +tracing = { version = "0.1.22", default-features = false } tracing-core = { version = "0.1.17", default-features = false} [features] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 382a0c4b3bd6aed7b4db675530d0d5c8574915cb..023bf7dcb30834e3dc5c1cbc74db8a145df4e6df 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -279,7 +279,35 @@ pub trait DefaultChildStorage { storage_key: &[u8], ) { let child_info = ChildInfo::new_default(storage_key); - self.kill_child_storage(&child_info); + self.kill_child_storage(&child_info, None); + } + + /// Clear a child storage key. + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend if + /// it is set to `Some`. No limit is applied when `limit` is set to `None`. + /// + /// The limit can be used to partially delete a child trie in case it is too large + /// to delete in one go (block). + /// + /// It returns false iff some keys are remaining in + /// the child trie after the functions returns. + /// + /// # Note + /// + /// Please note that keys that are residing in the overlay for that child trie when + /// issuing this call are all deleted without counting towards the `limit`. Only keys + /// written during the current block are part of the overlay. Deleting with a `limit` + /// mostly makes sense with an empty overlay for that child trie. + /// + /// Calling this function multiple times per block for the same `storage_key` does + /// not make much sense because it is not cumulative when called inside the same block. + /// Use this function to distribute the deletion of a single child trie across multiple + /// blocks. + #[version(2)] + fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> bool { + let child_info = ChildInfo::new_default(storage_key); + self.kill_child_storage(&child_info, limit) } /// Check a child storage key. @@ -707,6 +735,11 @@ pub trait Hashing { sp_core::hashing::keccak_256(data) } + /// Conduct a 512-bit Keccak hash. + fn keccak_512(data: &[u8]) -> [u8; 64] { + sp_core::hashing::keccak_512(data) + } + /// Conduct a 256-bit Sha2 hash. fn sha2_256(data: &[u8]) -> [u8; 32] { sp_core::hashing::sha2_256(data) @@ -1093,7 +1126,7 @@ mod tracing_setup { }; use super::{wasm_tracing, Crossing}; - const TRACING_SET : AtomicBool = AtomicBool::new(false); + static TRACING_SET: AtomicBool = AtomicBool::new(false); /// The PassingTracingSubscriber implements `tracing_core::Subscriber` diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index d53d1ebd533c8ab324edd416dd5464d48eccf48a..deffc2ccf9d3a0ababe6fb91cccce62a55476bea 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -20,10 +20,19 @@ futures = { version = "0.3.1" } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } parking_lot = { version = "0.10.0", default-features = false } - +serde = { version = "1.0", optional = true} sp-core = { version = "2.0.0", path = "../core" } sp-externalities = { version = "0.8.0", path = "../externalities", default-features = false } [dev-dependencies] rand = "0.7.2" rand_chacha = "0.2.2" + + +[features] +default = ["std"] +std = [ + "serde", + "schnorrkel/std", + "schnorrkel/serde", +] diff --git a/primitives/keystore/src/vrf.rs b/primitives/keystore/src/vrf.rs index 750ca0eac6be7eafc04ee117297f4011a8b4e156..9c1ac92738dcae654df316fb7f116f8b43c813f2 100644 --- a/primitives/keystore/src/vrf.rs +++ b/primitives/keystore/src/vrf.rs @@ -20,9 +20,11 @@ use codec::Encode; use merlin::Transcript; use schnorrkel::vrf::{VRFOutput, VRFProof}; + /// An enum whose variants represent possible /// accepted values to construct the VRF transcript #[derive(Clone, Encode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum VRFTranscriptValue { /// Value is an array of bytes Bytes(Vec), @@ -38,6 +40,7 @@ pub struct VRFTranscriptData { pub items: Vec<(&'static str, VRFTranscriptValue)>, } /// VRF signature data +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct VRFSignature { /// The VRFOutput serialized pub output: VRFOutput, diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index b35c407c40cd536e2d0bc0a466e04bc3577a7c4a..22997e4f616c7ba6e8a841a14c68e4a0366b5032 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -58,8 +58,8 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// /// The given struct provides function to convert from/to Assignment: /// -/// - [`from_assignment()`]. -/// - [`fn into_assignment()`]. +/// - `fn from_assignment<..>(..)` +/// - `fn into_assignment<..>(..)` /// /// The generated struct is by default deriving both `Encode` and `Decode`. This is okay but could /// lead to many 0s in the solution. If prefixed with `#[compact]`, then a custom compact encoding diff --git a/primitives/npos-elections/src/balancing.rs b/primitives/npos-elections/src/balancing.rs index 04083cc9b0d436e79715b986eaa8ac11a9edcfa3..517ac5c03f12ea0a97726e2f98219f7c188f0d20 100644 --- a/primitives/npos-elections/src/balancing.rs +++ b/primitives/npos-elections/src/balancing.rs @@ -36,7 +36,7 @@ use sp_std::prelude::*; /// change has been made (`difference = 0`). /// /// In almost all cases, a balanced solution will have a better score than an unbalanced solution, -/// yet this is not 100% guaranteed because the first element of a [`ElectionScore`] does not +/// yet this is not 100% guaranteed because the first element of a [`crate::ElectionScore`] does not /// directly related to balancing. /// /// Note that some reference implementation adopt an approach in which voters are balanced randomly diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 11951d2065989abd487e4b648d454cd8427336aa..2c7d133529c9df92461cc84700b76ac4a2b489a1 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -18,10 +18,10 @@ //! - [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast //! election method that ensures PJR, but does not provide a constant factor approximation of the //! maximin problem. -//! - [`phragmms`]: Implements a hybrid approach inspired by Phragmén which is executed faster but +//! - [`phragmms()`]: Implements a hybrid approach inspired by Phragmén which is executed faster but //! it can achieve a constant factor approximation of the maximin problem, similar to that of the //! MMS algorithm. -//! - [`balance_solution`]: Implements the star balancing algorithm. This iterative process can push +//! - [`balance`]: Implements the star balancing algorithm. This iterative process can push //! a solution toward being more `balances`, which in turn can increase its score. //! //! ### Terminology @@ -70,7 +70,7 @@ //! `StakedAssignment`. //! //! -//! More information can be found at: https://arxiv.org/abs/2004.12990 +//! More information can be found at: #![cfg_attr(not(feature = "std"), no_std)] @@ -283,7 +283,7 @@ impl Voter { }) } - /// Same as [`try_normalize`] but the normalization is only limited between elected edges. + /// Same as [`Self::try_normalize`] but the normalization is only limited between elected edges. pub fn try_normalize_elected(&mut self) -> Result<(), &'static str> { let elected_edge_weights = self .edges @@ -629,7 +629,7 @@ pub(crate) fn setup_inputs( }) .collect::>>(); - let voters = initial_voters.into_iter().map(|(who, voter_stake, votes)| { + let voters = initial_voters.into_iter().filter_map(|(who, voter_stake, votes)| { let mut edges: Vec> = Vec::with_capacity(votes.len()); for v in votes { if edges.iter().any(|e| e.who == v) { @@ -650,12 +650,18 @@ pub(crate) fn setup_inputs( ); } // else {} would be wrong votes. We don't really care about it. } - Voter { - who, - edges: edges, - budget: voter_stake.into(), - load: Rational128::zero(), + if edges.is_empty() { + None + } + else { + Some(Voter { + who, + edges: edges, + budget: voter_stake.into(), + load: Rational128::zero(), + }) } + }).collect::>(); (candidates, voters,) diff --git a/primitives/npos-elections/src/phragmen.rs b/primitives/npos-elections/src/phragmen.rs index cfbeed1cdd3fba411434a01cc88fbc82c74861b6..135f992aba783ccd830cdd701b1609a0087c1a92 100644 --- a/primitives/npos-elections/src/phragmen.rs +++ b/primitives/npos-elections/src/phragmen.rs @@ -108,9 +108,8 @@ pub fn seq_phragmen( /// `seq_phragmen` for more information. This function is left public in case a crate needs to use /// the implementation in a custom way. /// -/// To create th inputs needed for this function, see [`crate::setup_inputs`]. -/// /// This can only fail if the normalization fails. +// To create the inputs needed for this function, see [`crate::setup_inputs`]. pub fn seq_phragmen_core( rounds: usize, candidates: Vec>, diff --git a/primitives/npos-elections/src/reduce.rs b/primitives/npos-elections/src/reduce.rs index 17d7dd1290f7d5e122799231ec354a93bf066438..a96a2ed8457dd26b7a8b8b91dae34626bba19779 100644 --- a/primitives/npos-elections/src/reduce.rs +++ b/primitives/npos-elections/src/reduce.rs @@ -45,7 +45,7 @@ //! //! ### Resources: //! -//! 1. https://hackmd.io/JOn9x98iS0e0DPWQ87zGWg?view +//! 1. use crate::node::{Node, NodeId, NodeRef, NodeRole}; use crate::{ExtendedBalance, IdentifierT, StakedAssignment}; diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index dc7a1a5fdfb979ce6ee54695254f3d1701100a34..79f95a469adf42ed49221689b1f0edf85812fa38 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -72,6 +72,46 @@ fn float_phragmen_poc_works() { ); } +#[test] +fn phragmen_core_test_without_edges() { + let candidates = vec![1, 2, 3]; + let voters = vec![ + (10, 10, vec![]), + (20, 20, vec![]), + (30, 30, vec![]), + ]; + + let (candidates, voters) = setup_inputs(candidates, voters); + + assert_eq!( + voters + .iter() + .map(|v| ( + v.who, + v.budget, + (v.edges.iter().map(|e| (e.who, e.weight)).collect::>()), + )) + .collect::>(), + vec![] + ); + + assert_eq!( + candidates + .iter() + .map(|c_ptr| ( + c_ptr.borrow().who, + c_ptr.borrow().elected, + c_ptr.borrow().round, + c_ptr.borrow().backed_stake, + )).collect::>(), + vec![ + (1, false, 0, 0), + (2, false, 0, 0), + (3, false, 0, 0), + ] + ); +} + #[test] fn phragmen_core_poc_works() { let candidates = vec![1, 2, 3]; diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index acf454b960a7c861924c4d706582a53d0d71b1a0..0baba8ee7abab711f64966c109eb190ba8d381c4 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -15,4 +15,3 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] backtrace = "0.3.38" -log = "0.4.8" diff --git a/primitives/rpc/src/number.rs b/primitives/rpc/src/number.rs index 3d7e74753526c9a756cdd1a9b9f4d9f0d78a1b6d..0a81a34db8f7585fd8fee16773e44fc0f4c0fc7f 100644 --- a/primitives/rpc/src/number.rs +++ b/primitives/rpc/src/number.rs @@ -18,7 +18,7 @@ //! A number type that can be serialized both as a number or a string that encodes a number in a //! string. -use std::{convert::TryFrom, fmt::Debug}; +use std::{convert::{TryFrom, TryInto}, fmt::Debug}; use serde::{Serialize, Deserialize}; use sp_core::U256; @@ -67,24 +67,27 @@ pub struct TryFromIntError(pub(crate) ()); impl TryFrom for u32 { type Error = TryFromIntError; fn try_from(num_or_hex: NumberOrHex) -> Result { - let num_or_hex = num_or_hex.into_u256(); - if num_or_hex > U256::from(u32::max_value()) { - return Err(TryFromIntError(())); - } else { - Ok(num_or_hex.as_u32()) - } + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) } } impl TryFrom for u64 { type Error = TryFromIntError; fn try_from(num_or_hex: NumberOrHex) -> Result { - let num_or_hex = num_or_hex.into_u256(); - if num_or_hex > U256::from(u64::max_value()) { - return Err(TryFromIntError(())); - } else { - Ok(num_or_hex.as_u64()) - } + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) + } +} + +impl TryFrom for u128 { + type Error = TryFromIntError; + fn try_from(num_or_hex: NumberOrHex) -> Result { + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) + } +} + +impl From for U256 { + fn from(num_or_hex: NumberOrHex) -> U256 { + num_or_hex.into_u256() } } diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index bc36098f05a54854591562980e297f8b2e8f32d3..180914e89dd65a7b8be94433b4747e0556796b50 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -23,6 +23,7 @@ codec = { package = "parity-scale-codec", version = "1.3.1", default-features = static_assertions = "1.0.0" primitive-types = { version = "0.7.0", default-features = false } sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } +impl-trait-for-tuples = "0.1.3" [dev-dependencies] sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } @@ -30,7 +31,7 @@ sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" sp-core = { version = "2.0.0", path = "../core" } sp-io = { version = "2.0.0", path = "../io" } rustversion = "1.0.0" -trybuild = "1.0.23" +trybuild = { git = "https://github.com/bkchr/trybuild.git", branch = "bkchr-use-workspace-cargo-lock" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index da57cf086beef3497fac5e8104269e4c5e49d248..7d84085a9e49a973b9eb1b1ce8ac49a325a1c8a0 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -365,7 +365,9 @@ impl PassBy for Option { type PassBy = Codec; } -impl PassBy for (u32, u32, u32, u32) { +#[impl_trait_for_tuples::impl_for_tuples(30)] +#[tuple_types_no_default_trait_bound] +impl PassBy for Tuple where Self: codec::Codec { type PassBy = Codec; } diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index dd625a4a25346e4947d19d79a20cf6ae8d2a6696..7a7b78bc45b4b0feb6e5f64bf8d230a204ace044 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -99,7 +99,7 @@ //! | `*const T` | `u32` | `Identity` | //! | `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | //! | [`T where T: PassBy`](./pass_by#Inner) | Depends on inner | Depends on inner | -//! | [`T where T:PassBy`](./pass_by#Codec)|`u64`|v.len() 32bit << 32 |v.as_ptr() 32bit| +//! | [`T where T: PassBy`](./pass_by#Codec)|`u64`|v.len() 32bit << 32 |v.as_ptr() 32bit| //! //! `Identity` means that the value is converted directly into the corresponding FFI type. diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index 59790eb172eb3da4311b383ea8c430983af4c259..eba557de5dbab7bd030f618bb6c1be85ee8b5839 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -19,7 +19,7 @@ sp-io = { version = "2.0.0", default-features = false, path = "../../io" } sp-core = { version = "2.0.0", default-features = false, path = "../../core" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "3.0.0", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/test-wasm-deprecated/build.rs b/primitives/runtime-interface/test-wasm-deprecated/build.rs index 4f111bc9930078d09f02a1e189e2edcd26c993e4..8a0b4d7a0c15745cbc743130b522ddf693d3822b 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/build.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/build.rs @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 174cdb8cdf85a394073726fa3e71196a93c37702..ae0697b2938f4719747ad9236e27ced83e00bbac 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -26,8 +26,8 @@ use sp_runtime_interface::runtime_interface; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index 39c8df976a5baf76ba37b89a0509da32b3e84260..3cf36f95145e6a3ae10858a36c6db0fd56323582 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -19,7 +19,7 @@ sp-io = { version = "2.0.0", default-features = false, path = "../../io" } sp-core = { version = "2.0.0", default-features = false, path = "../../core" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "3.0.0", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/test-wasm/build.rs b/primitives/runtime-interface/test-wasm/build.rs index 4f111bc9930078d09f02a1e189e2edcd26c993e4..8a0b4d7a0c15745cbc743130b522ddf693d3822b 100644 --- a/primitives/runtime-interface/test-wasm/build.rs +++ b/primitives/runtime-interface/test-wasm/build.rs @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 28895df2214d17a63b6310579eed4adf5f843484..852be609fef741a7b588aab013fc60dcaee63066 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -30,8 +30,8 @@ use sp_core::{sr25519::Public, wasm_export_functions}; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") @@ -120,6 +120,16 @@ pub trait TestApi { fn test_versionning(&self, data: u32) -> bool { data == 42 } + + /// Returns the input values as tuple. + fn return_input_as_tuple( + a: Vec, + b: u32, + c: Option>, + d: u8, + ) -> (Vec, u32, Option>, u8) { + (a, b, c, d) + } } /// This function is not used, but we require it for the compiler to include `sp-io`. @@ -258,4 +268,18 @@ wasm_export_functions! { assert!(!test_api::test_versionning(50)); assert!(!test_api::test_versionning(102)); } + + fn test_return_input_as_tuple() { + let a = vec![1, 3, 4, 5]; + let b = 10000; + let c = Some(vec![2, 3]); + let d = 5; + + let res = test_api::return_input_as_tuple(a.clone(), b, c.clone(), d); + + assert_eq!(a, res.0); + assert_eq!(b, res.1); + assert_eq!(c, res.2); + assert_eq!(d, res.3); + } } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index d802f9cb6b39af77b69dff6bd9732d32e253f8bd..d6da3db4b69b1cdc7506b457bd729f7acca07c29 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -20,5 +20,5 @@ sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machin sp-runtime = { version = "2.0.0", path = "../../runtime" } sp-core = { version = "2.0.0", path = "../../core" } sp-io = { version = "2.0.0", path = "../../io" } -tracing = "0.1.19" +tracing = "0.1.22" tracing-core = "0.1.17" diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index c66609daa2f29695ed3d05435e5c5638744c8963..1f079e86ff3d805b7d304bae486054fc80b3f822 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -208,4 +208,9 @@ fn test_tracing() { let inner = subscriber.0.lock().unwrap(); assert!(inner.spans.contains("return_input_version_1")); -} \ No newline at end of file +} + +#[test] +fn test_return_input_as_tuple() { + call_wasm_method::(&wasm_binary_unwrap()[..], "test_return_input_as_tuple"); +} diff --git a/primitives/runtime-interface/tests/ui.rs b/primitives/runtime-interface/tests/ui.rs index 2f7fd6d06bcd34c3724d6fa39422d4f7890392cc..f23c7291e8ef7ad11229baa4b277bc2f12fdf06c 100644 --- a/primitives/runtime-interface/tests/ui.rs +++ b/primitives/runtime-interface/tests/ui.rs @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 6579a17c77fec015dbbb99e1ce0ff707c2e5c08b..9c3286cd4750d26cc51c879a9e76dd0c79405dc1 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -26,7 +26,6 @@ log = { version = "0.4.8", optional = true } paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } @@ -50,7 +49,6 @@ std = [ "sp-std/std", "sp-io/std", "serde", - "sp-inherents/std", "parity-util-mem/std", "hash256-std-hasher/std", "either/use_std", diff --git a/primitives/runtime/src/offchain/storage.rs b/primitives/runtime/src/offchain/storage.rs index 2f62d400c0b950b326f70edae145de259eb16e7d..e39514686e17c6665ebe5a377796d7d115e7cc2d 100644 --- a/primitives/runtime/src/offchain/storage.rs +++ b/primitives/runtime/src/offchain/storage.rs @@ -105,7 +105,6 @@ mod tests { use sp_io::TestExternalities; use sp_core::offchain::{ OffchainExt, - OffchainStorage, testing, }; @@ -125,7 +124,7 @@ mod tests { assert_eq!(val.get::(), Some(Some(15_u32))); assert_eq!(val.get::>(), Some(None)); assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), + state.read().persistent_storage.get(b"testval"), Some(vec![15_u8, 0, 0, 0]) ); }) @@ -148,7 +147,7 @@ mod tests { assert_eq!(result, Ok(Ok(16_u32))); assert_eq!(val.get::(), Some(Some(16_u32))); assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), + state.read().persistent_storage.get(b"testval"), Some(vec![16_u8, 0, 0, 0]) ); diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index a3838f21fd13d55ce3dc71386597cb25741753fd..0d9cf835c15e3a8861ab5eeac4cc1b9715451ab4 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -438,11 +438,11 @@ pub trait BlockNumberProvider { /// /// In case of using crate `sp_runtime` without the crate `frame` /// system, it is already implemented for - /// `frame_system::Module` as: + /// `frame_system::Module` as: /// /// ```ignore /// fn current_block_number() -> Self { - /// frame_system::Module::block_number() + /// frame_system::Module::block_number() /// } /// ``` /// . @@ -452,7 +452,7 @@ pub trait BlockNumberProvider { #[cfg(test)] mod tests { use super::*; - use sp_core::offchain::{testing, OffchainExt, OffchainStorage}; + use sp_core::offchain::{testing, OffchainExt}; use sp_io::TestExternalities; const VAL_1: u32 = 0u32; @@ -485,7 +485,7 @@ mod tests { } }); // lock must have been cleared at this point - assert_eq!(state.read().persistent_storage.get(b"", b"lock_1"), None); + assert_eq!(state.read().persistent_storage.get(b"lock_1"), None); } #[test] @@ -508,7 +508,7 @@ mod tests { guard.forget(); }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_2"); + let opt = state.read().persistent_storage.get(b"lock_2"); assert!(opt.is_some()); } @@ -540,7 +540,7 @@ mod tests { }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_3"); + let opt = state.read().persistent_storage.get(b"lock_3"); assert!(opt.is_some()); } @@ -587,7 +587,7 @@ mod tests { }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_4"); + let opt = state.read().persistent_storage.get(b"lock_4"); assert_eq!(opt.unwrap(), vec![132_u8, 3u8, 0, 0, 0, 0, 0, 0]); // 132 + 256 * 3 = 900 } } diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 4ce9ac0afa9a3990d63552acd7b7d5dabdd4acf0..d475be3579baf600b82848aa6498290bc29730a7 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -693,7 +693,7 @@ pub trait Dispatchable { /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. type Origin; /// ... - type Trait; + type Config; /// An opaque set of information attached to the transaction. This could be constructed anywhere /// down the line in a runtime. The current Substrate runtime uses a struct with the same name /// to represent the dispatch class and weight. @@ -712,7 +712,7 @@ pub type PostDispatchInfoOf = ::PostInfo; impl Dispatchable for () { type Origin = (); - type Trait = (); + type Config = (); type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 360fe9a985682f457d0065ebecfee5846d3842f5..02151c2480e314c84895185ad29b5b2ab546b1bc 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -94,7 +94,8 @@ pub trait Backend: sp_std::fmt::Debug { ) -> Result, Self::Error>; /// Retrieve all entries keys of child storage and call `f` for each of those keys. - fn for_keys_in_child_storage( + /// Aborts as soon as `f` returns false. + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, @@ -263,12 +264,12 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - (*self).for_keys_in_child_storage(child_info, f) + (*self).apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 5e3c9bed64f10f2d70fac9493035b48fe3620366..9de75785e4598f146dd1eae1f3ba434424561800 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -210,8 +210,10 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, child_info: &ChildInfo, - ) { + _limit: Option, + ) -> bool { self.inner.children_default.remove(child_info.storage_key()); + true } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -407,7 +409,7 @@ mod tests { ext.clear_child_storage(child_info, b"dog"); assert_eq!(ext.child_storage(child_info, b"dog"), None); - ext.kill_child_storage(child_info); + ext.kill_child_storage(child_info, None); assert_eq!(ext.child_storage(child_info, b"doe"), None); } diff --git a/primitives/state-machine/src/error.rs b/primitives/state-machine/src/error.rs index 0b02c68f79f5c0d5621b8ccaf03e98ae0af5a372..f20f9e530dc7f6f74730c2c29f9150c44f09d922 100644 --- a/primitives/state-machine/src/error.rs +++ b/primitives/state-machine/src/error.rs @@ -32,18 +32,18 @@ impl Error for T {} /// would not be executed unless externalities were available. This is included for completeness, /// and as a transition away from the pre-existing framework. #[derive(Debug, Eq, PartialEq)] +#[allow(missing_docs)] #[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum ExecutionError { - /// Backend error. #[cfg_attr(feature = "std", error("Backend error {0:?}"))] Backend(crate::DefaultError), - /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. + #[cfg_attr(feature = "std", error("`:code` entry does not exist in storage"))] CodeEntryDoesNotExist, - /// Backend is incompatible with execution proof generation process. + #[cfg_attr(feature = "std", error("Unable to generate proof"))] UnableToGenerateProof, - /// Invalid execution proof. + #[cfg_attr(feature = "std", error("Invalid execution proof"))] InvalidProof, } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 53aab42999d5eb40cc5c7ae11d5fca8040629d41..3c4d88f3920b0b5fe86dbda3cf76ba8b1f3d6cde 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -411,18 +411,41 @@ where fn kill_child_storage( &mut self, child_info: &ChildInfo, - ) { + limit: Option, + ) -> bool { trace!(target: "state", "{:04x}: KillChild({})", self.id, HexDisplay::from(&child_info.storage_key()), ); let _guard = guard(); - self.mark_dirty(); self.overlay.clear_child_storage(child_info); - self.backend.for_keys_in_child_storage(child_info, |key| { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - }); + + if let Some(limit) = limit { + let mut num_deleted: u32 = 0; + let mut all_deleted = true; + self.backend.apply_to_child_keys_while(child_info, |key| { + if num_deleted == limit { + all_deleted = false; + return false; + } + if let Some(num) = num_deleted.checked_add(1) { + num_deleted = num; + } else { + all_deleted = false; + return false; + } + self.overlay.set_child_storage(child_info, key.to_vec(), None); + true + }); + all_deleted + } else { + self.backend.apply_to_child_keys_while(child_info, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); + true + }); + true + } } fn clear_prefix(&mut self, prefix: &[u8]) { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 28148b6411a13ccdd14b31ba5779f553c04a3e62..c83dce4bedf695767fa68de02c470acb4fe7ac1f 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1147,6 +1147,86 @@ mod tests { ); } + #[test] + fn limited_child_kill_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let initial: HashMap<_, BTreeMap<_, _>> = map![ + Some(child_info.clone()) => map![ + b"a".to_vec() => b"0".to_vec(), + b"b".to_vec() => b"1".to_vec(), + b"c".to_vec() => b"2".to_vec(), + b"d".to_vec() => b"3".to_vec() + ], + ]; + let backend = InMemoryBackend::::from(initial); + + let mut overlay = OverlayedChanges::default(); + overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())); + + { + let mut offchain_overlay = Default::default(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut offchain_overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), false); + } + + assert_eq!( + overlay.children() + .flat_map(|(iter, _child_info)| iter) + .map(|(k, v)| (k.clone(), v.value().clone())) + .collect::>(), + map![ + b"1".to_vec() => None.into(), + b"2".to_vec() => None.into(), + b"3".to_vec() => None.into(), + b"4".to_vec() => None.into(), + b"a".to_vec() => None.into(), + b"b".to_vec() => None.into(), + ], + ); + } + + #[test] + fn limited_child_kill_off_by_one_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let initial: HashMap<_, BTreeMap<_, _>> = map![ + Some(child_info.clone()) => map![ + b"a".to_vec() => b"0".to_vec(), + b"b".to_vec() => b"1".to_vec(), + b"c".to_vec() => b"2".to_vec(), + b"d".to_vec() => b"3".to_vec() + ], + ]; + let backend = InMemoryBackend::::from(initial); + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = Default::default(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut offchain_overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!(ext.kill_child_storage(&child_info, Some(0)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(1)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(3)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(4)), true); + assert_eq!(ext.kill_child_storage(&child_info, Some(5)), true); + } + #[test] fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); @@ -1179,6 +1259,7 @@ mod tests { ); ext.kill_child_storage( child_info, + None, ); assert_eq!( ext.child_storage( diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0888c561cae3081c340e70fa50e71ca736d899e7..63a027cfba06e15fb0baf444eb5926fdf3f5cad9 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -204,12 +204,12 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.0.for_keys_in_child_storage(child_info, f) + self.0.apply_to_child_keys_while(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 1b70958145c70eddac2991590cbb105a503d0a39..2ab92f5fbb6c83c0e26011a5acbee227a9155828 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -131,7 +131,8 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn kill_child_storage( &mut self, _child_info: &ChildInfo, - ) { + _limit: Option, + ) -> bool { unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities") } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 4eaa0870baed039e96c19097e8eeddccb4bc76e8..ffae1a02c036eed4c7bd4a07a03e5a0abb0513c9 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -113,12 +113,12 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage( + fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(child_info, f) + self.essence.apply_to_child_keys_while(child_info, f) } fn for_child_keys_with_prefix( diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 37bbbb7cf9822f179901f9da78aa4a617e73263c..8485cb27e700a2e0bbe6da4bb339c3028c92f80b 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -190,7 +190,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( + /// Aborts as soon as `f` returns false. + pub fn apply_to_child_keys_while bool>( &self, child_info: &ChildInfo, f: F, diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 8994d069e4c76cc35806e07bc4c76677427cbdb4..efb4c498f75fb0805c15bb8de05ca15db4254250 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -118,7 +118,8 @@ impl Externalities for AsyncExternalities { fn kill_child_storage( &mut self, _child_info: &ChildInfo, - ) { + _limit: Option, + ) -> bool { panic!("`kill_child_storage`: should not be used in async externalities!") } diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 1000952b39fd455f18904aeca65df47a0a274604..c6d4d7b4caccd26784907567ec993e05334cd0d8 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -20,10 +20,10 @@ targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] [dependencies] sp-std = { version = "2.0.0", path = "../std", default-features = false} codec = { version = "1.3.1", package = "parity-scale-codec", default-features = false, features = ["derive"]} -tracing = { version = "0.1.21", default-features = false } +tracing = { version = "0.1.22", default-features = false } tracing-core = { version = "0.1.17", default-features = false } log = { version = "0.4.8", optional = true } -tracing-subscriber = { version = "0.2.10", optional = true, features = ["tracing-log"] } +tracing-subscriber = { version = "0.2.15", optional = true, features = ["tracing-log"] } [features] default = [ "std" ] diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 57ba3a28ac3c1b1f520925cba3c0940fa2e342cd..4247e1a50c9bc1c2d2603d33d451f16f0de0197d 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -14,8 +14,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +thiserror = { version = "1.0.21", optional = true } codec = { package = "parity-scale-codec", version = "1.3.1", optional = true } -derive_more = { version = "0.99.2", optional = true } +derive_more = { version = "0.99.11", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", features = ["derive"], optional = true} @@ -31,6 +32,7 @@ std = [ "futures", "log", "serde", + "thiserror", "sp-api/std", "sp-blockchain", "sp-runtime/std", diff --git a/primitives/transaction-pool/src/error.rs b/primitives/transaction-pool/src/error.rs index 531b397cb946c854cad5aa90f73d4ed739378057..e356df75908a79bba772a493ea68e0deabc71a2f 100644 --- a/primitives/transaction-pool/src/error.rs +++ b/primitives/transaction-pool/src/error.rs @@ -25,49 +25,49 @@ use sp_runtime::transaction_validity::{ pub type Result = std::result::Result; /// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error, derive_more::From)] +#[allow(missing_docs)] pub enum Error { - /// Transaction is not verifiable yet, but might be in the future. - #[display(fmt="Unknown transaction validity: {:?}", _0)] + #[error("Unknown transaction validity: {0:?}")] UnknownTransaction(UnknownTransaction), - /// Transaction is invalid. - #[display(fmt="Invalid transaction validity: {:?}", _0)] + + #[error("Invalid transaction validity: {0:?}")] InvalidTransaction(InvalidTransaction), + /// The transaction validity returned no "provides" tag. /// /// Such transactions are not accepted to the pool, since we use those tags /// to define identity of transactions (occupance of the same "slot"). - #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] + #[error("Transaction does not provide any tags, so the pool can't identify it")] NoTagsProvided, - /// The transaction is temporarily banned. - #[display(fmt="Temporarily Banned")] + + #[error("Transaction temporarily Banned")] TemporarilyBanned, - /// The transaction is already in the pool. - #[display(fmt="[{:?}] Already imported", _0)] + + #[error("[{0:?}] Already imported")] AlreadyImported(Box), - /// The transaction cannot be imported cause it's a replacement and has too low priority. - #[display(fmt="Too low priority ({} > {})", old, new)] + + #[error("Too low priority ({} > {})", old, new)] TooLowPriority { /// Transaction already in the pool. old: Priority, /// Transaction entering the pool. new: Priority }, - /// Deps cycle detected and we couldn't import transaction. - #[display(fmt="Cycle Detected")] + #[error("Transaction with cyclic dependency")] CycleDetected, - /// Transaction was dropped immediately after it got inserted. - #[display(fmt="Transaction couldn't enter the pool because of the limit.")] + + #[error("Transaction couldn't enter the pool because of the limit")] ImmediatelyDropped, - /// Invalid block id. + + #[from(ignore)] + #[error("{0}")] InvalidBlockId(String), - /// The pool is not accepting future transactions. - #[display(fmt="The pool is not accepting future transactions")] + + #[error("The pool is not accepting future transactions")] RejectedFutureTransaction, } -impl std::error::Error for Error {} - /// Transaction pool error conversion. pub trait IntoPoolError: std::error::Error + Send + Sized { /// Try to extract original `Error` diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 73a4a8029b2d71d08f6cf8c9b7a39cec003d27d4..2687d8e422796ae1ac1aeb0c293e70bc9f00bc4d 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -271,7 +271,8 @@ pub fn child_delta_trie_root( } /// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( +/// Aborts as soon as `f` returns false. +pub fn for_keys_in_child_trie bool, DB>( keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -290,7 +291,9 @@ pub fn for_keys_in_child_trie( for x in iter { let (key, _) = x?; - f(&key); + if !f(&key) { + break; + } } Ok(()) diff --git a/primitives/utils/src/mpsc.rs b/primitives/utils/src/mpsc.rs index 70baa006bdcdc0d86b99aec71859361806f9044c..321ab72f0d2724008770f9349de2d3401d5a9b33 100644 --- a/primitives/utils/src/mpsc.rs +++ b/primitives/utils/src/mpsc.rs @@ -63,7 +63,7 @@ mod inner { /// `UNBOUNDED_CHANNELS_COUNTER` pub fn tracing_unbounded(key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { let (s, r) = mpsc::unbounded(); - (TracingUnboundedSender(key.clone(), s), TracingUnboundedReceiver(key,r)) + (TracingUnboundedSender(key, s), TracingUnboundedReceiver(key,r)) } impl TracingUnboundedSender { diff --git a/primitives/utils/src/status_sinks.rs b/primitives/utils/src/status_sinks.rs index 65a560af4eaa52a8eb3fa49cd0d5e6bc38527be7..6ca9452893f3e133787a0d4d69b219a015b0273f 100644 --- a/primitives/utils/src/status_sinks.rs +++ b/primitives/utils/src/status_sinks.rs @@ -43,6 +43,12 @@ struct YieldAfter { sender: Option>, } +impl Default for StatusSinks { + fn default() -> Self { + Self::new() + } +} + impl StatusSinks { /// Builds a new empty collection. pub fn new() -> StatusSinks { diff --git a/ss58-registry.json b/ss58-registry.json index 80d600ed593bf6b106c5d7d3fa8859a9be99c241..5c90856505a6e8859a0359e66e316ab61c10884c 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -257,10 +257,10 @@ "prefix": 30, "network": "phala", "displayName": "Phala Network", - "symbols": null, - "decimals": null, + "symbols": ["PHA"], + "decimals": [12], "standardAccount": "*25519", - "website": null + "website": "https://phala.network" }, { "prefix": 32, @@ -298,6 +298,15 @@ "standardAccount": "*25519", "website": "https://nodle.io/" }, + { + "prefix": 38, + "network": "kilt", + "displayName": "KILT Chain", + "symbols": ["KILT"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://kilt.io/" + }, { "prefix": 39, "network": "mathchain", @@ -316,6 +325,15 @@ "standardAccount": "*25519", "website": "https://mathwallet.org" }, + { + "prefix": 41, + "network": "poli", + "displayName": "Polimec Chain", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": "https://polimec.io/" + }, { "prefix": 42, "network": "substrate", diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index ddadc2cb7177d8c2645059e4c71e85000e26cba1..7606b0c1c15bcd0429acc3896857d13bd1d8bcdd 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -18,4 +18,4 @@ tokio = { version = "0.2.13", features = ["macros"] } [dev-dependencies] sc-service = { version = "0.8.0", path = "../client/service" } -trybuild = { version = "1.0", features = ["diff"] } +trybuild = { git = "https://github.com/bkchr/trybuild.git", branch = "bkchr-use-workspace-cargo-lock", features = [ "diff" ] } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index cb6147adf25c6454418a2825eca009cf8274584c..cf1a4bcddd5b384480ed96530ac1249a3c6afb78 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -56,7 +56,7 @@ sc-executor = { version = "0.8.0", path = "../../client/executor" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "3.0.0", path = "../../utils/wasm-builder" } [features] default = [ diff --git a/test-utils/runtime/build.rs b/test-utils/runtime/build.rs index 834551a7ba12dabe22d9e0671e8107b39e12fb39..5c9af20528a0bd9a61b5a538e97db9f0975f9ca1 100644 --- a/test-utils/runtime/build.rs +++ b/test-utils/runtime/build.rs @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../utils/wasm-builder") .export_heap_base() // Note that we set the stack-size to 1MB explicitly even though it is set // to this value by default. This is because some of our tests (`restoration_of_globals`) diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index e772a28ee33a2fbfc702424c74a1abb646f5c861..ea29215a4f7e4a7578efa086256a08b6570bc659 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The Substrate runtime. This can be compiled with #[no_std], ready for Wasm. +//! The Substrate runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -42,9 +42,11 @@ use sp_runtime::{ }, traits::{ BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT, - GetNodeBlockType, GetRuntimeBlockType, NumberFor, Verify, IdentityLookup, + GetNodeBlockType, GetRuntimeBlockType, Verify, IdentityLookup, }, }; +#[cfg(feature = "std")] +use sp_runtime::traits::NumberFor; use sp_version::RuntimeVersion; pub use sp_core::hash::H256; #[cfg(any(feature = "std", test))] @@ -52,8 +54,9 @@ use sp_version::NativeVersion; use frame_support::{ impl_outer_origin, parameter_types, traits::KeyOwnerProofSystem, - weights::{RuntimeDbWeight, Weight}, + weights::RuntimeDbWeight, }; +use frame_system::limits::{BlockWeights, BlockLength}; use sp_inherents::{CheckInherentsResult, InherentData}; use cfg_if::cfg_if; @@ -66,8 +69,8 @@ pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") @@ -197,7 +200,7 @@ impl ExtrinsicT for Extrinsic { impl sp_runtime::traits::Dispatchable for Extrinsic { type Origin = Origin; - type Trait = (); + type Config = (); type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { @@ -427,17 +430,20 @@ impl From> for Event { parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const MinimumPeriod: u64 = 5; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 100, write: 1000, }; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub RuntimeBlockLength: BlockLength = + BlockLength::max(4 * 1024 * 1024); + pub RuntimeBlockWeights: BlockWeights = + BlockWeights::with_sensible_defaults(4 * 1024 * 1024, Perbill::from_percent(75)); } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; type Origin = Origin; type Call = Extrinsic; type Index = u64; @@ -449,13 +455,7 @@ impl frame_system::Trait for Runtime { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -464,7 +464,7 @@ impl frame_system::Trait for Runtime { type SystemWeightInfo = (); } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = (); @@ -477,7 +477,7 @@ parameter_types! { pub const ExpectedBlockTime: u64 = 10_000; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; // there is no actual runtime in this test-runtime, so testing crates diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 818487a89e518c35ed7a8942fa0e862e0c13804a..db22a6092c71488d102b6a6e6fc77247dc9e7e19 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -32,7 +32,7 @@ use sp_runtime::{ }, }; use codec::{KeyedVec, Encode, Decode}; -use frame_system::Trait; +use frame_system::Config; use crate::{ AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId }; @@ -42,11 +42,11 @@ const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } decl_storage! { - trait Store for Module as TestRuntime { + trait Store for Module as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option; diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 9efc8c396680ee87521c173527efeb134ca276bd..31fc1e37f3d44e8167b9b5f9a38f45193c0c5ec3 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -16,12 +16,12 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.24", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.25", features = ["websocket"] } console_error_panic_hook = "0.1.6" -console_log = "0.1.2" +console_log = "0.2.0" js-sys = "0.3.34" wasm-bindgen = "0.2.57" -wasm-bindgen-futures = "0.4.7" +wasm-bindgen-futures = "0.4.18" kvdb-web = "0.7" sp-database = { version = "2.0.0", path = "../../primitives/database" } sc-informant = { version = "0.8.0", path = "../../client/informant" } diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index 95ec7ca19c9a4eda4c2e930e055dae4495c4437b..071ed332fcdf41d59a82e2a55f13001c5072304e 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -75,6 +75,7 @@ where DatabaseConfig::Custom(sp_database::as_database(db)) }, + keystore_remote: Default::default(), keystore: KeystoreConfig::InMemory, default_heap_pages: Default::default(), dev_key_seed: Default::default(), @@ -105,6 +106,7 @@ where informant_output_format: sc_informant::OutputFormat { enable_color: false, }, + disable_log_reloading: false, }; Ok(config) diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 1d01c53417649ac1d3df2802dbe43063ebf8c9e7..d2a0a4f3dd657826cfc0486b8f2038b43171ecd7 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -144,7 +144,7 @@ impl ForkTree where for child in root_children { if is_first && (child.number == *number && child.hash == *hash || - child.number < *number && is_descendent_of(&child.hash, hash).unwrap_or(false)) + child.number < *number && is_descendent_of(&child.hash, hash)?) { root.children.push(child); // assuming that the tree is well formed only one child should pass this requirement @@ -229,7 +229,10 @@ impl ForkTree where number = n; data = d; }, - None => return Ok(false), + None => { + self.rebalance(); + return Ok(false); + }, } } @@ -251,7 +254,9 @@ impl ForkTree where } fn node_iter(&self) -> impl Iterator> { - ForkTreeIterator { stack: self.roots.iter().collect() } + // we need to reverse the order of roots to maintain the expected + // ordering since the iterator uses a stack to track state. + ForkTreeIterator { stack: self.roots.iter().rev().collect() } } /// Iterates the nodes in the tree in pre-order. @@ -410,15 +415,15 @@ impl ForkTree where // another fork not part of the tree). make sure to only keep roots that // are part of the finalized branch let mut changed = false; - self.roots.retain(|root| { - let retain = root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); + let roots = std::mem::take(&mut self.roots); - if !retain { + for root in roots { + if root.number > number && is_descendent_of(hash, &root.hash)? { + self.roots.push(root); + } else { changed = true; } - - retain - }); + } self.best_finalized_number = Some(number); @@ -462,16 +467,19 @@ impl ForkTree where let (is_finalized, is_descendant, is_ancestor) = { let root = &self.roots[idx]; let is_finalized = root.hash == *hash; - let is_descendant = !is_finalized - && root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); - let is_ancestor = !is_finalized && !is_descendant - && root.number < number && is_descendent_of(&root.hash, hash).unwrap_or(false); + let is_descendant = + !is_finalized && root.number > number && is_descendent_of(hash, &root.hash)?; + let is_ancestor = !is_finalized + && !is_descendant && root.number < number + && is_descendent_of(&root.hash, hash)?; (is_finalized, is_descendant, is_ancestor) }; // if we have met finalized root - open it and return if is_finalized { - return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))); + return Ok(FinalizationResult::Changed(Some( + self.finalize_root_at(idx), + ))); } // if node is descendant of finalized block - just leave it as is @@ -605,18 +613,19 @@ impl ForkTree where // descendent (in this case the node wasn't finalized earlier presumably // because the predicate didn't pass). let mut changed = false; - self.roots.retain(|root| { - let retain = - root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false) || - root.number == number && root.hash == *hash || - is_descendent_of(&root.hash, hash).unwrap_or(false); + let roots = std::mem::take(&mut self.roots); - if !retain { + for root in roots { + let retain = root.number > number && is_descendent_of(hash, &root.hash)? + || root.number == number && root.hash == *hash + || is_descendent_of(&root.hash, hash)?; + + if retain { + self.roots.push(root); + } else { changed = true; } - - retain - }); + } self.best_finalized_number = Some(number); @@ -898,8 +907,7 @@ impl Iterator for RemovedIterator { // child nodes are stored ordered by max branch height (decreasing), // we want to keep this ordering while iterating but since we're // using a stack for iterator state we need to reverse it. - let mut children = Vec::new(); - std::mem::swap(&mut children, &mut node.children); + let children = std::mem::take(&mut node.children); self.stack.extend(children.into_iter().rev()); (node.hash, node.number, node.data) @@ -939,6 +947,10 @@ mod test { // — J - K // // (where N is not a part of fork tree) + // + // NOTE: the tree will get automatically rebalance on import and won't be laid out like the + // diagram above. the children will be ordered by subtree depth and the longest branches + // will be on the leftmost side of the tree. let is_descendent_of = |base: &&str, block: &&str| -> Result { let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "O"]; match (*base, *block) { @@ -1132,7 +1144,7 @@ mod test { assert_eq!( tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], + vec![("L", 4), ("I", 4)], ); // finalizing a node from another fork that isn't part of the tree clears the tree @@ -1180,7 +1192,7 @@ mod test { assert_eq!( tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], + vec![("L", 4), ("I", 4)], ); assert_eq!( @@ -1354,11 +1366,11 @@ mod test { vec![ ("A", 1), ("B", 2), ("C", 3), ("D", 4), ("E", 5), - ("F", 2), + ("F", 2), ("H", 3), ("L", 4), ("M", 5), + ("O", 5), + ("I", 4), ("G", 3), - ("H", 3), ("I", 4), - ("L", 4), ("M", 5), ("O", 5), - ("J", 2), ("K", 3) + ("J", 2), ("K", 3), ], ); } @@ -1480,7 +1492,7 @@ mod test { assert_eq!( removed.map(|(hash, _, _)| hash).collect::>(), - vec!["A", "F", "G", "H", "I", "L", "M", "O", "J", "K"] + vec!["A", "F", "H", "L", "M", "O", "I", "G", "J", "K"] ); let removed = tree.prune( @@ -1545,19 +1557,30 @@ mod test { fn tree_rebalance() { let (mut tree, _) = test_fork_tree(); + // the tree is automatically rebalanced on import, therefore we should iterate in preorder + // exploring the longest forks first. check the ascii art above to understand the expected + // output below. assert_eq!( tree.iter().map(|(h, _, _)| *h).collect::>(), - vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "L", "M", "O", "J", "K"], + vec!["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"], ); - // after rebalancing the tree we should iterate in preorder exploring - // the longest forks first. check the ascii art above to understand the - // expected output below. - tree.rebalance(); + // let's add a block "P" which is a descendent of block "O" + let is_descendent_of = |base: &&str, block: &&str| -> Result { + match (*base, *block) { + (b, "P") => Ok(vec!["A", "F", "L", "O"].into_iter().any(|n| n == b)), + _ => Ok(false), + } + }; + + tree.import("P", 6, (), &is_descendent_of).unwrap(); + // this should re-order the tree, since the branch "A -> B -> C -> D -> E" is no longer tied + // with 5 blocks depth. additionally "O" should be visited before "M" now, since it has one + // descendent "P" which makes that branch 6 blocks long. assert_eq!( tree.iter().map(|(h, _, _)| *h).collect::>(), - ["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"] + ["A", "F", "H", "L", "O", "P", "M", "I", "G", "B", "C", "D", "E", "J", "K"] ); } } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 4ee2454e708e9a471c70a2c25a6be807100e28b3..83f93799691d341d4b29713918ba248f1b4e78dc 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -23,11 +23,12 @@ sp-externalities = { version = "0.8.0", path = "../../../primitives/externalitie sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -structopt = "0.3.8" codec = { version = "1.3.1", package = "parity-scale-codec" } +structopt = "0.3.8" chrono = "0.4" serde = "1.0.116" handlebars = "3.5.0" +Inflector = "0.11.4" [features] default = ["db"] diff --git a/utils/frame/benchmarking-cli/src/template.hbs b/utils/frame/benchmarking-cli/src/template.hbs index fd066b1a3a8ae034f5ce65684bfea30c0bc87ad8..0ff6144214d61d82878a76d2eb9e387ab1d14342 100644 --- a/utils/frame/benchmarking-cli/src/template.hbs +++ b/utils/frame/benchmarking-cli/src/template.hbs @@ -18,7 +18,7 @@ use sp_std::marker::PhantomData; /// Weight functions for {{pallet}}. pub struct WeightInfo(PhantomData); -impl {{pallet}}::WeightInfo for WeightInfo { +impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmarks as |benchmark|}} fn {{benchmark.name~}} ( @@ -27,6 +27,7 @@ impl {{pallet}}::WeightInfo for WeightInfo { ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) {{~/each}} {{~#if (ne benchmark.base_reads "0")}} diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index 61423000231d0f13800a61ca7a7672c335ba3884..4afc8107306789d3c3544c41fa8824618e4e8f90 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -22,9 +22,10 @@ use std::fs; use std::path::PathBuf; use serde::Serialize; +use inflector::Inflector; use crate::BenchmarkCmd; -use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis}; +use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, RegressionModel}; use sp_runtime::traits::Zero; const VERSION: &'static str = env!("CARGO_PKG_VERSION"); @@ -37,6 +38,7 @@ struct TemplateData { date: String, version: String, pallet: String, + instance: String, header: String, cmd: CmdData, benchmarks: Vec, @@ -84,6 +86,8 @@ struct ComponentSlope { name: String, #[serde(serialize_with = "string_serialize")] slope: u128, + #[serde(serialize_with = "string_serialize")] + error: u128, } // Small helper to create an `io::Error` from a string. @@ -100,7 +104,7 @@ fn io_error(s: &str) -> std::io::Error { // p1 -> [b1, b2, b3] // p2 -> [b1, b2] // ``` -fn map_results(batches: &[BenchmarkBatch]) -> Result>, std::io::Error> { +fn map_results(batches: &[BenchmarkBatch]) -> Result>, std::io::Error> { // Skip if batches is empty. if batches.is_empty() { return Err(io_error("empty batches")) } @@ -113,6 +117,7 @@ fn map_results(batches: &[BenchmarkBatch]) -> Result Result) -> impl Iterator + '_ { + let mut errors = model.as_ref().map(|m| m.se.regressor_values.iter()); + std::iter::from_fn(move || { + match &mut errors { + Some(model) => model.next().map(|val| *val as u128), + _ => Some(0), + } + }) +} + // Analyze and return the relevant results for a given benchmark. fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData { // Analyze benchmarks to get the linear regression. @@ -145,27 +162,45 @@ fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData { let mut used_reads = Vec::new(); let mut used_writes = Vec::new(); - extrinsic_time.slopes.into_iter().zip(extrinsic_time.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_extrinsic_time.push(ComponentSlope { - name: name.clone(), - slope: slope.saturating_mul(1000), - }); - } - }); - reads.slopes.into_iter().zip(reads.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_reads.push(ComponentSlope { name: name.clone(), slope }); - } - }); - writes.slopes.into_iter().zip(writes.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_writes.push(ComponentSlope { name: name.clone(), slope }); - } - }); + extrinsic_time.slopes.into_iter() + .zip(extrinsic_time.names.iter()) + .zip(extract_errors(&extrinsic_time.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_extrinsic_time.push(ComponentSlope { + name: name.clone(), + slope: slope.saturating_mul(1000), + error: error.saturating_mul(1000), + }); + } + }); + reads.slopes.into_iter() + .zip(reads.names.iter()) + .zip(extract_errors(&reads.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_reads.push(ComponentSlope { + name: name.clone(), + slope, + error, + }); + } + }); + writes.slopes.into_iter() + .zip(writes.names.iter()) + .zip(extract_errors(&writes.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { used_components.push(name); } + used_writes.push(ComponentSlope { + name: name.clone(), + slope, + error, + }); + } + }); // This puts a marker on any component which is entirely unused in the weight formula. let components = batch.results[0].components @@ -241,12 +276,18 @@ pub fn write_results( // Organize results by pallet into a JSON map let all_results = map_results(batches)?; - for (pallet, results) in all_results.into_iter() { + for ((pallet, instance), results) in all_results.iter() { let mut file_path = path.clone(); // If a user only specified a directory... if file_path.is_dir() { - // Create new file: "path/to/pallet_name.rs". - file_path.push(&pallet); + // Check if there might be multiple instances benchmarked. + if all_results.keys().any(|(p, i)| p == pallet && i != instance) { + // Create new file: "path/to/pallet_name_instance_name.rs". + file_path.push(pallet.clone() + "_" + &instance.to_snake_case()); + } else { + // Create new file: "path/to/pallet_name.rs". + file_path.push(pallet.clone()); + } file_path.set_extension("rs"); } @@ -254,10 +295,11 @@ pub fn write_results( args: args.clone(), date: date.clone(), version: VERSION.to_string(), - pallet: pallet, + pallet: pallet.to_string(), + instance: instance.to_string(), header: header_text.clone(), cmd: cmd_data.clone(), - benchmarks: results, + benchmarks: results.clone(), }; let mut output_file = fs::File::create(file_path)?; @@ -362,6 +404,7 @@ mod test { return BenchmarkBatch { pallet: [pallet.to_vec(), b"_pallet".to_vec()].concat(), + instance: b"instance".to_vec(), benchmark: [benchmark.to_vec(), b"_benchmark".to_vec()].concat(), results, } @@ -379,18 +422,30 @@ mod test { assert_eq!(benchmark.base_weight, base * 1_000); assert_eq!( benchmark.component_weight, - vec![ComponentSlope { name: component.to_string(), slope: slope * 1_000 }] + vec![ComponentSlope { + name: component.to_string(), + slope: slope * 1_000, + error: 0, + }] ); // DB Reads/Writes are untouched assert_eq!(benchmark.base_reads, base); assert_eq!( benchmark.component_reads, - vec![ComponentSlope { name: component.to_string(), slope: slope }] + vec![ComponentSlope { + name: component.to_string(), + slope, + error: 0, + }] ); assert_eq!(benchmark.base_writes, base); assert_eq!( benchmark.component_writes, - vec![ComponentSlope { name: component.to_string(), slope: slope }] + vec![ComponentSlope { + name: component.to_string(), + slope, + error: 0, + }] ); } @@ -402,15 +457,21 @@ mod test { test_data(b"second", b"first", BenchmarkParameter::c, 3, 4), ]).unwrap(); - let first_benchmark = &mapped_results.get("first_pallet").unwrap()[0]; + let first_benchmark = &mapped_results.get( + &("first_pallet".to_string(), "instance".to_string()) + ).unwrap()[0]; assert_eq!(first_benchmark.name, "first_benchmark"); check_data(first_benchmark, "a", 10, 3); - let second_benchmark = &mapped_results.get("first_pallet").unwrap()[1]; + let second_benchmark = &mapped_results.get( + &("first_pallet".to_string(), "instance".to_string()) + ).unwrap()[1]; assert_eq!(second_benchmark.name, "second_benchmark"); check_data(second_benchmark, "b", 9, 2); - let second_pallet_benchmark = &mapped_results.get("second_pallet").unwrap()[0]; + let second_pallet_benchmark = &mapped_results.get( + &("second_pallet".to_string(), "instance".to_string()) + ).unwrap()[0]; assert_eq!(second_pallet_benchmark.name, "first_benchmark"); check_data(second_pallet_benchmark, "c", 3, 4); } diff --git a/utils/frame/frame-utilities-cli/src/module_id.rs b/utils/frame/frame-utilities-cli/src/module_id.rs index cc76c70d0fa8e511c889377840202c403da26106..ae26f31ad24f347cbd835743c232461111148ba4 100644 --- a/utils/frame/frame-utilities-cli/src/module_id.rs +++ b/utils/frame/frame-utilities-cli/src/module_id.rs @@ -64,7 +64,7 @@ impl ModuleIdCmd { /// runs the command pub fn run(&self) -> Result<(), Error> where - R: frame_system::Trait, + R: frame_system::Config, R::AccountId: Ss58Codec, { if self.id.len() != 8 { diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index dc87d6185209deba1c02a4f0f8abb3973f361199..85cb433cb2b32c2ee86b63f3e5e5d5e457a8b5c4 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -40,11 +40,11 @@ use sc_rpc_api::state::StateClient; /// # use codec::Encode; /// # use frame_support::{decl_storage, decl_module}; /// # use substrate_frame_rpc_support::StorageQuery; -/// # use frame_system::Trait; +/// # use frame_system::Config; /// # use sc_rpc_api::state::StateClient; /// # -/// # // Hash would normally be ::Hash, but we don't have -/// # // frame_system::Trait implemented for TestRuntime. Here we just pretend. +/// # // Hash would normally be ::Hash, but we don't have +/// # // frame_system::Config implemented for TestRuntime. Here we just pretend. /// # type Hash = (); /// # /// # fn main() -> Result<(), RpcError> { @@ -54,7 +54,7 @@ use sc_rpc_api::state::StateClient; /// # struct TestRuntime; /// # /// # decl_module! { -/// # pub struct Module for enum Call where origin: T::Origin {} +/// # pub struct Module for enum Call where origin: T::Origin {} /// # } /// # /// pub type Loc = (i64, i64, i64); @@ -62,7 +62,7 @@ use sc_rpc_api::state::StateClient; /// /// // Note that all fields are marked pub. /// decl_storage! { -/// trait Store for Module as TestRuntime { +/// trait Store for Module as TestRuntime { /// pub LastActionId: u64; /// pub Voxels: map hasher(blake2_128_concat) Loc => Block; /// pub Actions: map hasher(blake2_128_concat) u64 => Loc; @@ -125,7 +125,7 @@ impl StorageQuery { /// Send this query over RPC, await the typed result. /// - /// Hash should be ::Hash. + /// Hash should be ::Hash. /// /// # Arguments /// diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 9eed7a2fdcfcd07364f610f2260069aca4b5d40b..335f84bf0f2678c85098aa4d6cd9fc1b9e6bdbd3 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -20,5 +20,5 @@ derive_more = "0.99" [target.'cfg(not(target_os = "unknown"))'.dependencies] async-std = { version = "1.6.5", features = ["unstable"] } -hyper = { version = "0.13.1", default-features = false, features = ["stream"] } +hyper = { version = "0.13.9", default-features = false, features = ["stream"] } tokio = "0.2" diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index be7050a8a07369ccf83db3a572bcec77e8a67521..097073239c411c071c6699c31277a244e9e65b3f 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -33,7 +33,7 @@ use std::net::SocketAddr; mod networking; mod sourced; -pub use sourced::{SourcedCounter, SourcedGauge, MetricSource}; +pub use sourced::{SourcedCounter, SourcedGauge, MetricSource, SourcedMetric}; #[cfg(target_os = "unknown")] pub use unknown_os::init_prometheus; diff --git a/utils/wasm-builder-runner/Cargo.toml b/utils/wasm-builder-runner/Cargo.toml deleted file mode 100644 index 2c54a5ec3a4d6940aeb4b6c80cec2b5ff5c10ed2..0000000000000000000000000000000000000000 --- a/utils/wasm-builder-runner/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "substrate-wasm-builder-runner" -version = "2.0.0" -authors = ["Parity Technologies "] -description = "Runner for substrate-wasm-builder" -edition = "2018" -readme = "README.md" -repository = "https://github.com/paritytech/substrate/" -license = "Apache-2.0" -homepage = "https://substrate.dev" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] diff --git a/utils/wasm-builder-runner/README.md b/utils/wasm-builder-runner/README.md deleted file mode 100644 index 1b9e2b08ca44405cf307c559b29b743eb8039b10..0000000000000000000000000000000000000000 --- a/utils/wasm-builder-runner/README.md +++ /dev/null @@ -1,12 +0,0 @@ -## WASM builder runner - -Since cargo contains many bugs when it comes to correct dependency and feature -resolution, we need this little tool. See for -more information. - -It will create a project that will call `substrate-wasm-builder` to prevent any dependencies -from `substrate-wasm-builder` influencing the main project's dependencies. - -For more information see - -License: GPL-3.0 diff --git a/utils/wasm-builder-runner/src/lib.rs b/utils/wasm-builder-runner/src/lib.rs deleted file mode 100644 index 04e06495c69b43635f747dd889c27c61fa02c272..0000000000000000000000000000000000000000 --- a/utils/wasm-builder-runner/src/lib.rs +++ /dev/null @@ -1,498 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # WASM builder runner -//! -//! Since cargo contains many bugs when it comes to correct dependency and feature -//! resolution, we need this little tool. See for -//! more information. -//! -//! It will create a project that will call `substrate-wasm-builder` to prevent any dependencies -//! from `substrate-wasm-builder` influencing the main project's dependencies. -//! -//! For more information see - -use std::{ - env, process::{Command, self}, fs, path::{PathBuf, Path}, hash::{Hash, Hasher}, - collections::hash_map::DefaultHasher, -}; - -/// Environment variable that tells us to skip building the WASM binary. -const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; - -/// Environment variable that tells us to create a dummy WASM binary. -/// -/// This is useful for `cargo check` to speed-up the compilation. -/// -/// # Caution -/// -/// Enabling this option will just provide `&[]` as WASM binary. -const DUMMY_WASM_BINARY_ENV: &str = "BUILD_DUMMY_WASM_BINARY"; - -/// Environment variable that makes sure the WASM build is triggered. -const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; - -/// Replace all backslashes with slashes. -fn replace_back_slashes(path: T) -> String { - path.to_string().replace("\\", "/") -} - -/// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. -fn get_manifest_dir() -> PathBuf { - env::var("CARGO_MANIFEST_DIR") - .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") - .into() -} - -/// First step of the [`WasmBuilder`] to select the project to build. -pub struct WasmBuilderSelectProject { - /// This parameter just exists to make it impossible to construct - /// this type outside of this crate. - _ignore: (), -} - -impl WasmBuilderSelectProject { - /// Use the current project as project for building the WASM binary. - /// - /// # Panics - /// - /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable - /// is always set by `Cargo` in `build.rs` files. - pub fn with_current_project(self) -> WasmBuilderSelectSource { - WasmBuilderSelectSource(get_manifest_dir().join("Cargo.toml")) - } - - /// Use the given `path` as project for building the WASM binary. - /// - /// Returns an error if the given `path` does not points to a `Cargo.toml`. - pub fn with_project( - self, - path: impl Into, - ) -> Result { - let path = path.into(); - - if path.ends_with("Cargo.toml") { - Ok(WasmBuilderSelectSource(path)) - } else { - Err("Project path must point to the `Cargo.toml` of the project") - } - } -} - -/// Second step of the [`WasmBuilder`] to set the source of the `wasm-builder`. -pub struct WasmBuilderSelectSource(PathBuf); - -impl WasmBuilderSelectSource { - /// Use the given `path` as source for `wasm-builder`. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_path(self, path: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Path(path), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `repo` and `rev` as source for `wasm-builder`. - pub fn with_wasm_builder_from_git(self, repo: &'static str, rev: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Git { repo, rev }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io. - pub fn with_wasm_builder_from_crates(self, version: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Crates(version), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io or use - /// the given `path` as source. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_crates_or_path( - self, - version: &'static str, - path: &'static str, - ) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::CratesOrPath { version, path }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `source` as source for `wasm-builder`. - pub fn with_wasm_builder_source(self, source: WasmBuilderSource) -> WasmBuilder { - WasmBuilder { - source, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } -} - -/// The builder for building a wasm binary. -/// -/// The builder itself is seperated into multiple structs to make the setup type safe. -/// -/// Building a wasm binary: -/// -/// 1. Call [`WasmBuilder::new`] to create a new builder. -/// 2. Select the project to build using the methods of [`WasmBuilderSelectProject`]. -/// 3. Select the source of the `wasm-builder` crate using the methods of -/// [`WasmBuilderSelectSource`]. -/// 4. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code -/// using methods of [`WasmBuilder`]. -/// 5. Build the WASM binary using [`Self::build`]. -pub struct WasmBuilder { - /// Where should we pull the `wasm-builder` crate from. - source: WasmBuilderSource, - /// Flags that should be appended to `RUST_FLAGS` env variable. - rust_flags: Vec, - /// The name of the file that is being generated in `OUT_DIR`. - /// - /// Defaults to `wasm_binary.rs`. - file_name: Option, - /// The path to the `Cargo.toml` of the project that should be build - /// for wasm. - project_cargo_toml: PathBuf, -} - -impl WasmBuilder { - /// Create a new instance of the builder. - pub fn new() -> WasmBuilderSelectProject { - WasmBuilderSelectProject { - _ignore: (), - } - } - - /// Enable exporting `__heap_base` as global variable in the WASM binary. - /// - /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. - pub fn export_heap_base(mut self) -> Self { - self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); - self - } - - /// Set the name of the file that will be generated in `OUT_DIR`. - /// - /// This file needs to be included to get access to the build WASM binary. - /// - /// If this function is not called, `file_name` defaults to `wasm_binary.rs` - pub fn set_file_name(mut self, file_name: impl Into) -> Self { - self.file_name = Some(file_name.into()); - self - } - - /// Instruct the linker to import the memory into the WASM binary. - /// - /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. - pub fn import_memory(mut self) -> Self { - self.rust_flags.push("-C link-arg=--import-memory".into()); - self - } - - /// Append the given `flag` to `RUST_FLAGS`. - /// - /// `flag` is appended as is, so it needs to be a valid flag. - pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { - self.rust_flags.push(flag.into()); - self - } - - /// Build the WASM binary. - pub fn build(self) { - let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); - let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); - - if check_skip_build() { - // If we skip the build, we still want to make sure to be called when an env variable - // changes - generate_rerun_if_changed_instructions(); - - provide_dummy_wasm_binary(&file_path, true); - - return; - } - - // Hash the path to the project cargo toml. - let mut hasher = DefaultHasher::new(); - self.project_cargo_toml.hash(&mut hasher); - - let project_name = env::var("CARGO_PKG_NAME").expect("`CARGO_PKG_NAME` is set by cargo!"); - // Make sure the `wasm-builder-runner` path is unique by concatenating the name of the - // project that is compiling the WASM binary with the hash of the path to the project that - // should be compiled as WASM binary. - let project_folder = get_workspace_root() - .join(format!("{}{}", project_name, hasher.finish())); - - if check_provide_dummy_wasm_binary() { - provide_dummy_wasm_binary(&file_path, false); - } else { - create_project( - &project_folder, - &file_path, - self.source, - &self.project_cargo_toml, - &self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect::(), - ); - run_project(&project_folder); - } - - // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't - // want to spam the output! - generate_rerun_if_changed_instructions(); - } -} - -/// The `wasm-builder` dependency source. -pub enum WasmBuilderSource { - /// The relative path to the source code from the current manifest dir. - Path(&'static str), - /// The git repository that contains the source code. - Git { - repo: &'static str, - rev: &'static str, - }, - /// Use the given version released on crates.io. - Crates(&'static str), - /// Use the given version released on crates.io or from the given path. - CratesOrPath { - version: &'static str, - path: &'static str, - } -} - -impl WasmBuilderSource { - /// Convert to a valid cargo source declaration. - /// - /// `absolute_path` - The manifest dir. - fn to_cargo_source(&self, manifest_dir: &Path) -> String { - match self { - WasmBuilderSource::Path(path) => { - replace_back_slashes(format!("path = \"{}\"", manifest_dir.join(path).display())) - } - WasmBuilderSource::Git { repo, rev } => { - format!("git = \"{}\", rev=\"{}\"", repo, rev) - } - WasmBuilderSource::Crates(version) => { - format!("version = \"{}\"", version) - } - WasmBuilderSource::CratesOrPath { version, path } => { - replace_back_slashes( - format!( - "path = \"{}\", version = \"{}\"", - manifest_dir.join(path).display(), - version - ) - ) - } - } - } -} - -/// Build the currently built project as WASM binary and extend `RUSTFLAGS` with the given rustflags. -/// -/// For more information, see [`build_current_project`]. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] -pub fn build_current_project_with_rustflags( - file_name: &str, - wasm_builder_source: WasmBuilderSource, - default_rust_flags: &str, -) { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_source(wasm_builder_source) - .append_to_rust_flags(default_rust_flags) - .set_file_name(file_name) - .build() -} - -/// Build the currently built project as WASM binary. -/// -/// The current project is determined using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name of the file being generated in the `OUT_DIR`. The file contains the -/// constant `WASM_BINARY` which contains the build wasm binary. -/// `wasm_builder_path` - Path to the wasm-builder project, relative to `CARGO_MANIFEST_DIR`. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] -pub fn build_current_project(file_name: &str, wasm_builder_source: WasmBuilderSource) { - #[allow(deprecated)] - build_current_project_with_rustflags(file_name, wasm_builder_source, ""); -} - -/// Returns the root path of the wasm-builder workspace. -/// -/// The wasm-builder workspace contains all wasm-builder's projects. -fn get_workspace_root() -> PathBuf { - let out_dir_env = env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!"); - let mut out_dir = PathBuf::from(&out_dir_env); - - loop { - match out_dir.parent() { - Some(parent) if out_dir.ends_with("build") => return parent.join("wbuild-runner"), - _ => if !out_dir.pop() { - break; - } - } - } - - panic!("Could not find target dir in: {}", out_dir_env) -} - -fn create_project( - project_folder: &Path, - file_path: &Path, - wasm_builder_source: WasmBuilderSource, - cargo_toml_path: &Path, - default_rustflags: &str, -) { - fs::create_dir_all(project_folder.join("src")) - .expect("WASM build runner dir create can not fail; qed"); - - write_file_if_changed( - project_folder.join("Cargo.toml"), - format!( - r#" - [package] - name = "wasm-build-runner-impl" - version = "1.0.0" - edition = "2018" - - [dependencies] - substrate-wasm-builder = {{ {wasm_builder_source} }} - - [workspace] - "#, - wasm_builder_source = wasm_builder_source.to_cargo_source(&get_manifest_dir()), - ), - ); - - write_file_if_changed( - project_folder.join("src/main.rs"), - format!( - r#" - //! This is automatically generated code by `substrate-wasm-builder`. - - use substrate_wasm_builder::build_project_with_default_rustflags; - - fn main() {{ - build_project_with_default_rustflags( - "{file_path}", - "{cargo_toml_path}", - "{default_rustflags}", - ) - }} - "#, - file_path = replace_back_slashes(file_path.display()), - cargo_toml_path = replace_back_slashes(cargo_toml_path.display()), - default_rustflags = default_rustflags, - ), - ); -} - -fn run_project(project_folder: &Path) { - let cargo = env::var("CARGO").expect("`CARGO` env variable is always set when executing `build.rs`."); - let mut cmd = Command::new(cargo); - cmd.arg("run").arg(format!("--manifest-path={}", project_folder.join("Cargo.toml").display())); - - if env::var("DEBUG") != Ok(String::from("true")) { - cmd.arg("--release"); - } - - // Make sure we always run the `wasm-builder` project for the `HOST` architecture. - let host_triple = env::var("HOST").expect("`HOST` is always set when executing `build.rs`."); - cmd.arg(&format!("--target={}", host_triple)); - - // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). - // The runner project is created in `CARGO_TARGET_DIR` and executing it will create a sub target - // directory inside of `CARGO_TARGET_DIR`. - cmd.env_remove("CARGO_TARGET_DIR"); - - if !cmd.status().map(|s| s.success()).unwrap_or(false) { - // Don't spam the output with backtraces when a build failed! - process::exit(1); - } -} - -/// Generate the name of the skip build environment variable for the current crate. -fn generate_crate_skip_build_env_name() -> String { - format!( - "SKIP_{}_WASM_BUILD", - env::var("CARGO_PKG_NAME").expect("Package name is set").to_uppercase().replace('-', "_"), - ) -} - -/// Checks if the build of the WASM binary should be skipped. -fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() -} - -/// Check if we should provide a dummy WASM binary. -fn check_provide_dummy_wasm_binary() -> bool { - env::var(DUMMY_WASM_BINARY_ENV).is_ok() -} - -/// Provide the dummy WASM binary -/// -/// If `skip_build` is `true`, it will only generate the wasm binary if it doesn't exist. -fn provide_dummy_wasm_binary(file_path: &Path, skip_build: bool) { - if !skip_build || !file_path.exists() { - write_file_if_changed( - file_path.into(), - "pub const WASM_BINARY: Option<&[u8]> = None;\ - pub const WASM_BINARY_BLOATY: Option<&[u8]> = None;".into(), - ); - } -} - -/// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is -/// rebuilt when needed. -fn generate_rerun_if_changed_instructions() { - // Make sure that the `build.rs` is called again if one of the following env variables changes. - println!("cargo:rerun-if-env-changed={}", SKIP_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", DUMMY_WASM_BINARY_ENV); - println!("cargo:rerun-if-env-changed={}", FORCE_WASM_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); -} - -/// Write to the given `file` if the `content` is different. -fn write_file_if_changed(file: PathBuf, content: String) { - if fs::read_to_string(&file).ok().as_ref() != Some(&content) { - fs::write(&file, content).unwrap_or_else(|_| panic!("Writing `{}` can not fail!", file.display())); - } -} diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index e9dd1a97b89e422517e7e2103bad51004aaa7e2f..199e26b509e2e509c84973d3906c7da2ee4cdedf 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-wasm-builder" -version = "2.0.1" +version = "3.0.0" authors = ["Parity Technologies "] description = "Utility for building WASM binaries" edition = "2018" @@ -14,12 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] build-helper = "0.1.1" -cargo_metadata = "0.10.0" +cargo_metadata = "0.12.0" tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.1" -fs2 = "0.4.3" wasm-gc-api = "0.1.11" atty = "0.2.13" -itertools = "0.8.2" ansi_term = "0.12.1" diff --git a/utils/wasm-builder/README.md b/utils/wasm-builder/README.md index 1e24d2cebab3220b68d281a33a495a01999f0130..3868faf1acab5a1e1c4f50d4671073f658d82cf7 100644 --- a/utils/wasm-builder/README.md +++ b/utils/wasm-builder/README.md @@ -8,20 +8,23 @@ The Wasm builder is a tool that integrates the process of building the WASM bina A project that should be compiled as a Wasm binary needs to: 1. Add a `build.rs` file. -2. Add `substrate-wasm-builder` as dependency into `build-dependencies`. +2. Add `wasm-builder` as dependency into `build-dependencies`. The `build.rs` file needs to contain the following code: ```rust -use wasm_builder_runner::{build_current_project, WasmBuilderSource}; +use substrate_wasm_builder::WasmBuilder; fn main() { - build_current_project( - // The name of the file being generated in out-dir. - "wasm_binary.rs", - // How to include wasm-builder, in this case from crates.io. - WasmBuilderSource::Crates("1.0.0"), - ); + WasmBuilder::new() + // Tell the builder to build the project (crate) this `build.rs` is part of. + .with_current_project() + // Make sure to export the `heap_base` global, this is required by Substrate + .export_heap_base() + // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) + .import_memory() + // Build it. + .build() } ``` @@ -32,9 +35,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); ``` This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. -The former is a compact Wasm binary and the latter is not compacted. +The former is a compact Wasm binary and the latter is the Wasm binary as being generated by the compiler. +Both variables have `Option<&'static [u8]>` as type. -### Feature +### Features Wasm builder supports to enable cargo features while building the Wasm binary. By default it will enable all features in the wasm build that are enabled for the native build except the @@ -46,19 +50,19 @@ Wasm binary. If this feature is not present, it will not be enabled. By using environment variables, you can configure which Wasm binaries are built and how: -- `SKIP_WASM_BUILD` - Skips building any wasm binary. This is useful when only native should be recompiled. -- `BUILD_DUMMY_WASM_BINARY` - Builds dummy wasm binaries. These dummy binaries are empty and useful - for `cargo check` runs. -- `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. +- `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be recompiled. + If this is the first run and there doesn't exist a Wasm binary, this will set both + variables to `None`. +- `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are `release` or `debug`. By default the build type is equal to the build type used by the main build. -- `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable - needs to change. As wasm builder instructs `cargo` to watch for file changes +- `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the variable + needs to change. As wasm-builder instructs `cargo` to watch for file changes this environment variable should only be required in certain circumstances. - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -- `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs +- `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs to be absolute. -- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The +- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..75e1d8057201282174e698a29a84eb7688e5f3b8 --- /dev/null +++ b/utils/wasm-builder/src/builder.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{env, path::{PathBuf, Path}, process}; + +/// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. +fn get_manifest_dir() -> PathBuf { + env::var("CARGO_MANIFEST_DIR") + .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") + .into() +} + +/// First step of the [`WasmBuilder`] to select the project to build. +pub struct WasmBuilderSelectProject { + /// This parameter just exists to make it impossible to construct + /// this type outside of this crate. + _ignore: (), +} + +impl WasmBuilderSelectProject { + /// Use the current project as project for building the WASM binary. + /// + /// # Panics + /// + /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable + /// is always set by `Cargo` in `build.rs` files. + pub fn with_current_project(self) -> WasmBuilder { + WasmBuilder { + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: get_manifest_dir().join("Cargo.toml"), + } + } + + /// Use the given `path` as project for building the WASM binary. + /// + /// Returns an error if the given `path` does not points to a `Cargo.toml`. + pub fn with_project( + self, + path: impl Into, + ) -> Result { + let path = path.into(); + + if path.ends_with("Cargo.toml") && path.exists() { + Ok(WasmBuilder { + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: path, + }) + } else { + Err("Project path must point to the `Cargo.toml` of the project") + } + } +} + +/// The builder for building a wasm binary. +/// +/// The builder itself is separated into multiple structs to make the setup type safe. +/// +/// Building a wasm binary: +/// +/// 1. Call [`WasmBuilder::new`] to create a new builder. +/// 2. Select the project to build using the methods of [`WasmBuilderSelectProject`]. +/// 3. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code +/// using methods of [`WasmBuilder`]. +/// 4. Build the WASM binary using [`Self::build`]. +pub struct WasmBuilder { + /// Flags that should be appended to `RUST_FLAGS` env variable. + rust_flags: Vec, + /// The name of the file that is being generated in `OUT_DIR`. + /// + /// Defaults to `wasm_binary.rs`. + file_name: Option, + /// The path to the `Cargo.toml` of the project that should be built + /// for wasm. + project_cargo_toml: PathBuf, +} + +impl WasmBuilder { + /// Create a new instance of the builder. + pub fn new() -> WasmBuilderSelectProject { + WasmBuilderSelectProject { + _ignore: (), + } + } + + /// Enable exporting `__heap_base` as global variable in the WASM binary. + /// + /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. + pub fn export_heap_base(mut self) -> Self { + self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); + self + } + + /// Set the name of the file that will be generated in `OUT_DIR`. + /// + /// This file needs to be included to get access to the build WASM binary. + /// + /// If this function is not called, `file_name` defaults to `wasm_binary.rs` + pub fn set_file_name(mut self, file_name: impl Into) -> Self { + self.file_name = Some(file_name.into()); + self + } + + /// Instruct the linker to import the memory into the WASM binary. + /// + /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. + pub fn import_memory(mut self) -> Self { + self.rust_flags.push("-C link-arg=--import-memory".into()); + self + } + + /// Append the given `flag` to `RUST_FLAGS`. + /// + /// `flag` is appended as is, so it needs to be a valid flag. + pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { + self.rust_flags.push(flag.into()); + self + } + + /// Build the WASM binary. + pub fn build(self) { + let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); + let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); + + if check_skip_build() { + // If we skip the build, we still want to make sure to be called when an env variable + // changes + generate_rerun_if_changed_instructions(); + + provide_dummy_wasm_binary_if_not_exist(&file_path); + + return; + } + + build_project( + file_path, + self.project_cargo_toml, + self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect(), + ); + + // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't + // want to spam the output! + generate_rerun_if_changed_instructions(); + } +} + +/// Generate the name of the skip build environment variable for the current crate. +fn generate_crate_skip_build_env_name() -> String { + format!( + "SKIP_{}_WASM_BUILD", + env::var("CARGO_PKG_NAME").expect("Package name is set").to_uppercase().replace('-', "_"), + ) +} + +/// Checks if the build of the WASM binary should be skipped. +fn check_skip_build() -> bool { + env::var(crate::SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() +} + +/// Provide a dummy WASM binary if there doesn't exist one. +fn provide_dummy_wasm_binary_if_not_exist(file_path: &Path) { + if !file_path.exists() { + crate::write_file_if_changed( + file_path, + "pub const WASM_BINARY: Option<&[u8]> = None;\ + pub const WASM_BINARY_BLOATY: Option<&[u8]> = None;", + ); + } +} + +/// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is +/// rebuilt when needed. +fn generate_rerun_if_changed_instructions() { + // Make sure that the `build.rs` is called again if one of the following env variables changes. + println!("cargo:rerun-if-env-changed={}", crate::SKIP_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", crate::FORCE_WASM_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); +} + +/// Build the currently built project as wasm binary. +/// +/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. +/// +/// `file_name` - The name + path of the file being generated. The file contains the +/// constant `WASM_BINARY`, which contains the built WASM binary. +/// `project_cargo_toml` - The path to the `Cargo.toml` of the project that should be built. +/// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. +fn build_project( + file_name: PathBuf, + project_cargo_toml: PathBuf, + default_rustflags: String, +) { + let cargo_cmd = match crate::prerequisites::check() { + Ok(cmd) => cmd, + Err(err_msg) => { + eprintln!("{}", err_msg); + process::exit(1); + }, + }; + + let (wasm_binary, bloaty) = crate::wasm_project::create_and_compile( + &project_cargo_toml, + &default_rustflags, + cargo_cmd, + ); + + let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { + ( + wasm_binary.wasm_binary_path_escaped(), + bloaty.wasm_binary_bloaty_path_escaped(), + ) + } else { + ( + bloaty.wasm_binary_bloaty_path_escaped(), + bloaty.wasm_binary_bloaty_path_escaped(), + ) + }; + + crate::write_file_if_changed( + file_name, + format!( + r#" + pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); + pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); + "#, + wasm_binary = wasm_binary, + wasm_binary_bloaty = wasm_binary_bloaty, + ), + ); +} diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index aa63e9596e19086d63691fe50fc97c5f8e245724..573afbfcb6dc552cfcc8624864725d346bf353a3 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -25,20 +25,23 @@ //! A project that should be compiled as a Wasm binary needs to: //! //! 1. Add a `build.rs` file. -//! 2. Add `substrate-wasm-builder` as dependency into `build-dependencies`. +//! 2. Add `wasm-builder` as dependency into `build-dependencies`. //! //! The `build.rs` file needs to contain the following code: //! -//! ```ignore -//! use wasm_builder_runner::{build_current_project, WasmBuilderSource}; +//! ```no_run +//! use substrate_wasm_builder::WasmBuilder; //! //! fn main() { -//! build_current_project( -//! // The name of the file being generated in out-dir. -//! "wasm_binary.rs", -//! // How to include wasm-builder, in this case from crates.io. -//! WasmBuilderSource::Crates("1.0.0"), -//! ); +//! WasmBuilder::new() +//! // Tell the builder to build the project (crate) this `build.rs` is part of. +//! .with_current_project() +//! // Make sure to export the `heap_base` global, this is required by Substrate +//! .export_heap_base() +//! // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) +//! .import_memory() +//! // Build it. +//! .build() //! } //! ``` //! @@ -49,7 +52,8 @@ //! ``` //! //! This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. -//! The former is a compact Wasm binary and the latter is not compacted. +//! The former is a compact Wasm binary and the latter is the Wasm binary as being generated by the compiler. +//! Both variables have `Option<&'static [u8]>` as type. //! //! ### Feature //! @@ -63,19 +67,19 @@ //! //! By using environment variables, you can configure which Wasm binaries are built and how: //! -//! - `SKIP_WASM_BUILD` - Skips building any wasm binary. This is useful when only native should be recompiled. -//! - `BUILD_DUMMY_WASM_BINARY` - Builds dummy wasm binaries. These dummy binaries are empty and useful -//! for `cargo check` runs. -//! - `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. +//! - `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be recompiled. +//! If this is the first run and there doesn't exist a Wasm binary, this will set both +//! variables to `None`. +//! - `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are `release` or `debug`. //! By default the build type is equal to the build type used by the main build. -//! - `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable -//! needs to change. As wasm builder instructs `cargo` to watch for file changes +//! - `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the variable +//! needs to change. As wasm-builder instructs `cargo` to watch for file changes //! this environment variable should only be required in certain circumstances. //! - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. //! - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -//! - `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs +//! - `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs //! to be absolute. -//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The +//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The //! format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. //! //! Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. @@ -92,11 +96,14 @@ //! as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, //! the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. -use std::{env, fs, path::{PathBuf, Path}, process::{Command, self}, io::BufRead}; +use std::{env, fs, path::{PathBuf, Path}, process::Command, io::BufRead}; +mod builder; mod prerequisites; mod wasm_project; +pub use builder::{WasmBuilder, WasmBuilderSelectProject}; + /// Environment variable that tells us to skip building the wasm binary. const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; @@ -120,87 +127,8 @@ const WASM_BUILD_NO_COLOR: &str = "WASM_BUILD_NO_COLOR"; /// Environment variable to set the toolchain used to compile the wasm binary. const WASM_BUILD_TOOLCHAIN: &str = "WASM_BUILD_TOOLCHAIN"; -/// Build the currently built project as wasm binary. -/// -/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name + path of the file being generated. The file contains the -/// constant `WASM_BINARY`, which contains the built WASM binary. -/// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. -pub fn build_project(file_name: &str, cargo_manifest: &str) { - build_project_with_default_rustflags(file_name, cargo_manifest, ""); -} - -/// Build the currently built project as wasm binary. -/// -/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name + path of the file being generated. The file contains the -/// constant `WASM_BINARY`, which contains the built WASM binary. -/// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. -/// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. -pub fn build_project_with_default_rustflags( - file_name: &str, - cargo_manifest: &str, - default_rustflags: &str, -) { - if check_skip_build() { - return; - } - - let cargo_manifest = PathBuf::from(cargo_manifest); - - if !cargo_manifest.exists() { - panic!("'{}' does not exist!", cargo_manifest.display()); - } - - if !cargo_manifest.ends_with("Cargo.toml") { - panic!("'{}' no valid path to a `Cargo.toml`!", cargo_manifest.display()); - } - - let cargo_cmd = match prerequisites::check() { - Ok(cmd) => cmd, - Err(err_msg) => { - eprintln!("{}", err_msg); - process::exit(1); - }, - }; - - let (wasm_binary, bloaty) = wasm_project::create_and_compile( - &cargo_manifest, - default_rustflags, - cargo_cmd, - ); - - let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { - ( - wasm_binary.wasm_binary_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) - } else { - ( - bloaty.wasm_binary_bloaty_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) - }; - - write_file_if_changed( - file_name, - format!( - r#" - pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); - pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); - "#, - wasm_binary = wasm_binary, - wasm_binary_bloaty = wasm_binary_bloaty, - ), - ); -} - -/// Checks if the build of the WASM binary should be skipped. -fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() -} +/// Environment variable that makes sure the WASM build is triggered. +const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; /// Write to the given `file` if the `content` is different. fn write_file_if_changed(file: impl AsRef, content: impl AsRef) { @@ -217,7 +145,9 @@ fn copy_file_if_changed(src: PathBuf, dst: PathBuf) { if src_file != dst_file { fs::copy(&src, &dst) - .unwrap_or_else(|_| panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display())); + .unwrap_or_else( + |_| panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display()) + ); } } diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index c27af71988b0716fab57d57d985d180be626811c..4c4c80e5a86642d155fc6f5b369598b22bf9e371 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -30,10 +30,6 @@ use cargo_metadata::{MetadataCommand, Metadata}; use walkdir::WalkDir; -use fs2::FileExt; - -use itertools::Itertools; - /// Colorize an info message. /// /// Returns the colorized message. @@ -70,31 +66,6 @@ impl WasmBinary { } } -/// A lock for the WASM workspace. -struct WorkspaceLock(fs::File); - -impl WorkspaceLock { - /// Create a new lock - fn new(wasm_workspace_root: &Path) -> Self { - let lock = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(wasm_workspace_root.join("wasm_workspace.lock")) - .expect("Opening the lock file does not fail"); - - lock.lock_exclusive().expect("Locking `wasm_workspace.lock` failed"); - - WorkspaceLock(lock) - } -} - -impl Drop for WorkspaceLock { - fn drop(&mut self) { - let _ = self.0.unlock(); - } -} - fn crate_metadata(cargo_manifest: &Path) -> Metadata { let mut cargo_lock = cargo_manifest.to_path_buf(); cargo_lock.set_file_name("Cargo.lock"); @@ -120,35 +91,36 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns +/// /// The path to the compact WASM binary and the bloaty WASM binary. pub(crate) fn create_and_compile( - cargo_manifest: &Path, + project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, ) -> (Option, WasmBinaryBloaty) { let wasm_workspace_root = get_wasm_workspace_root(); let wasm_workspace = wasm_workspace_root.join("wbuild"); - // Lock the workspace exclusively for us - let _lock = WorkspaceLock::new(&wasm_workspace_root); + let crate_metadata = crate_metadata(project_cargo_toml); - let crate_metadata = crate_metadata(cargo_manifest); - - let project = create_project(cargo_manifest, &wasm_workspace, &crate_metadata); - create_wasm_workspace_project(&wasm_workspace, &crate_metadata.workspace_root); + let project = create_project( + project_cargo_toml, + &wasm_workspace, + &crate_metadata, + &crate_metadata.workspace_root, + ); build_project(&project, default_rustflags, cargo_cmd); let (wasm_binary, bloaty) = compact_wasm_file( &project, - cargo_manifest, - &wasm_workspace, + project_cargo_toml, ); wasm_binary.as_ref().map(|wasm_binary| - copy_wasm_to_target_directory(cargo_manifest, wasm_binary) + copy_wasm_to_target_directory(project_cargo_toml, wasm_binary) ); - generate_rerun_if_changed_instructions(cargo_manifest, &project, &wasm_workspace); + generate_rerun_if_changed_instructions(project_cargo_toml, &project, &wasm_workspace); (wasm_binary, bloaty) } @@ -221,69 +193,14 @@ fn get_wasm_workspace_root() -> PathBuf { panic!("Could not find target dir in: {}", build_helper::out_dir().display()) } -/// Find all workspace members. -/// -/// Each folder in `wasm_workspace` is seen as a member of the workspace. Exceptions are -/// folders starting with "." and the "target" folder. -/// -/// Every workspace member that is not valid anymore is deleted (the folder of it). A -/// member is not valid anymore when the `wasm-project` dependency points to an non-existing -/// folder or the package name is not valid. -fn find_and_clear_workspace_members(wasm_workspace: &Path) -> Vec { - let mut members = WalkDir::new(wasm_workspace) - .min_depth(1) - .max_depth(1) - .into_iter() - .filter_map(|p| p.ok()) - .map(|d| d.into_path()) - .filter(|p| p.is_dir()) - .filter_map(|p| p.file_name().map(|f| f.to_owned()).and_then(|s| s.into_string().ok())) - .filter(|f| !f.starts_with('.') && f != "target") - .collect::>(); - - let mut i = 0; - while i != members.len() { - let path = wasm_workspace.join(&members[i]).join("Cargo.toml"); - - // Extract the `wasm-project` dependency. - // If the path can be extracted and is valid and the package name matches, - // the member is valid. - if let Some(mut wasm_project) = fs::read_to_string(path) - .ok() - .and_then(|s| toml::from_str::(&s).ok()) - .and_then(|mut t| t.remove("dependencies")) - .and_then(|p| p.try_into::
().ok()) - .and_then(|mut t| t.remove("wasm_project")) - .and_then(|p| p.try_into::
().ok()) - { - if let Some(path) = wasm_project.remove("path") - .and_then(|p| p.try_into::().ok()) - { - if let Some(name) = wasm_project.remove("package") - .and_then(|p| p.try_into::().ok()) - { - let path = PathBuf::from(path); - if path.exists() { - if name == get_crate_name(&path.join("Cargo.toml")) { - i += 1; - continue - } - } - } - } - } - - fs::remove_dir_all(wasm_workspace.join(&members[i])) - .expect("Removing invalid workspace member can not fail; qed"); - members.remove(i); - } - - members -} - -fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Path) { - let members = find_and_clear_workspace_members(wasm_workspace); - +fn create_project_cargo_toml( + wasm_workspace: &Path, + workspace_root_path: &Path, + crate_name: &str, + crate_path: &Path, + wasm_binary: &str, + enabled_features: &[String], +) { let mut workspace_toml: Table = toml::from_str( &fs::read_to_string( workspace_root_path.join("Cargo.toml"), @@ -306,12 +223,6 @@ fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Pa wasm_workspace_toml.insert("profile".into(), profile.into()); - // Add `workspace` with members - let mut workspace = Table::new(); - workspace.insert("members".into(), members.into()); - - wasm_workspace_toml.insert("workspace".into(), workspace.into()); - // Add patch section from the project root `Cargo.toml` if let Some(mut patch) = workspace_toml.remove("patch").and_then(|p| p.try_into::
().ok()) { // Iterate over all patches and make the patch path absolute from the workspace root path. @@ -335,6 +246,33 @@ fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Pa wasm_workspace_toml.insert("patch".into(), patch.into()); } + let mut package = Table::new(); + package.insert("name".into(), format!("{}-wasm", crate_name).into()); + package.insert("version".into(), "1.0.0".into()); + package.insert("edition".into(), "2018".into()); + + wasm_workspace_toml.insert("package".into(), package.into()); + + let mut lib = Table::new(); + lib.insert("name".into(), wasm_binary.into()); + lib.insert("crate-type".into(), vec!["cdylib".to_string()].into()); + + wasm_workspace_toml.insert("lib".into(), lib.into()); + + let mut dependencies = Table::new(); + + let mut wasm_project = Table::new(); + wasm_project.insert("package".into(), crate_name.into()); + wasm_project.insert("path".into(), crate_path.display().to_string().into()); + wasm_project.insert("default-features".into(), false.into()); + wasm_project.insert("features".into(), enabled_features.to_vec().into()); + + dependencies.insert("wasm-project".into(), wasm_project.into()); + + wasm_workspace_toml.insert("dependencies".into(), dependencies.into()); + + wasm_workspace_toml.insert("workspace".into(), Table::new().into()); + write_file_if_changed( wasm_workspace.join("Cargo.toml"), toml::to_string_pretty(&wasm_workspace_toml).expect("Wasm workspace toml is valid; qed"), @@ -394,56 +332,48 @@ fn has_runtime_wasm_feature_declared( /// Create the project used to build the wasm binary. /// /// # Returns -/// The path to the created project. -fn create_project(cargo_manifest: &Path, wasm_workspace: &Path, crate_metadata: &Metadata) -> PathBuf { - let crate_name = get_crate_name(cargo_manifest); - let crate_path = cargo_manifest.parent().expect("Parent path exists; qed"); - let wasm_binary = get_wasm_binary_name(cargo_manifest); - let project_folder = wasm_workspace.join(&crate_name); - - fs::create_dir_all(project_folder.join("src")) +/// +/// The path to the created wasm project. +fn create_project( + project_cargo_toml: &Path, + wasm_workspace: &Path, + crate_metadata: &Metadata, + workspace_root_path: &Path, +) -> PathBuf { + let crate_name = get_crate_name(project_cargo_toml); + let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); + let wasm_binary = get_wasm_binary_name(project_cargo_toml); + let wasm_project_folder = wasm_workspace.join(&crate_name); + + fs::create_dir_all(wasm_project_folder.join("src")) .expect("Wasm project dir create can not fail; qed"); - let mut enabled_features = project_enabled_features(&cargo_manifest, &crate_metadata); + let mut enabled_features = project_enabled_features(&project_cargo_toml, &crate_metadata); - if has_runtime_wasm_feature_declared(cargo_manifest, crate_metadata) { + if has_runtime_wasm_feature_declared(project_cargo_toml, crate_metadata) { enabled_features.push("runtime-wasm".into()); } - write_file_if_changed( - project_folder.join("Cargo.toml"), - format!( - r#" - [package] - name = "{crate_name}-wasm" - version = "1.0.0" - edition = "2018" - - [lib] - name = "{wasm_binary}" - crate-type = ["cdylib"] - - [dependencies] - wasm_project = {{ package = "{crate_name}", path = "{crate_path}", default-features = false, features = [ {features} ] }} - "#, - crate_name = crate_name, - crate_path = crate_path.display(), - wasm_binary = wasm_binary, - features = enabled_features.into_iter().map(|f| format!("\"{}\"", f)).join(","), - ) + create_project_cargo_toml( + &wasm_project_folder, + workspace_root_path, + &crate_name, + &crate_path, + &wasm_binary, + &enabled_features, ); write_file_if_changed( - project_folder.join("src/lib.rs"), + wasm_project_folder.join("src/lib.rs"), "#![no_std] pub use wasm_project::*;", ); - if let Some(crate_lock_file) = find_cargo_lock(cargo_manifest) { + if let Some(crate_lock_file) = find_cargo_lock(project_cargo_toml) { // Use the `Cargo.lock` of the main project. - crate::copy_file_if_changed(crate_lock_file, wasm_workspace.join("Cargo.lock")); + crate::copy_file_if_changed(crate_lock_file, wasm_project_folder.join("Cargo.lock")); } - project_folder + wasm_project_folder } /// Returns if the project should be built as a release. @@ -474,9 +404,13 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), ); - build_cmd.args(&["rustc", "--target=wasm32-unknown-unknown"]) + build_cmd.args(&["-Zfeatures=build_dep", "rustc", "--target=wasm32-unknown-unknown"]) .arg(format!("--manifest-path={}", manifest_path.display())) .env("RUSTFLAGS", rustflags) + // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). + // The runner project is created in `CARGO_TARGET_DIR` and executing it will create a sub target + // directory inside of `CARGO_TARGET_DIR`. + .env_remove("CARGO_TARGET_DIR") // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); @@ -503,14 +437,14 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman fn compact_wasm_file( project: &Path, cargo_manifest: &Path, - wasm_workspace: &Path, ) -> (Option, WasmBinaryBloaty) { let is_release_build = is_release_build(); let target = if is_release_build { "release" } else { "debug" }; let wasm_binary = get_wasm_binary_name(cargo_manifest); - let wasm_file = wasm_workspace.join("target/wasm32-unknown-unknown") + let wasm_file = project.join("target/wasm32-unknown-unknown") .join(target) .join(format!("{}.wasm", wasm_binary)); + let wasm_compact_file = if is_release_build { let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file)